
yeah, it's failing because naiming of items / stuff changed:Jorgo wrote:i tried with gentoo-sources-2.6.35 but the second patch part fails ...
so restored the original file.
--- mm/vmscan.c 2010-07-20 11:21:08.000000000 +0800
+++ mm/vmscan.c 2010-08-01 16:47:52.000000000 +0800
@@ -1337,14 +1378,8 @@
nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
- /*
- * If we are direct reclaiming for contiguous pages and we do
- * not reclaim everything in the list, try again and wait
- * for IO to complete. This will stall high-order allocations
- * but that should be acceptable to the caller
- */
- if (nr_reclaimed < nr_taken && !current_is_kswapd() &&
- sc->lumpy_reclaim_mode) {
+ /* Check if we should syncronously wait for writeback */
+ if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
/*
--- mm/vmscan.c 2010-07-20 11:21:08.000000000 +0800
+++ mm/vmscan.c 2010-08-01 16:47:52.000000000 +0800
@@ -1244,14 +1356,8 @@
nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
- /*
- * If we are direct reclaiming for contiguous pages and we do
- * not reclaim everything in the list, try again and wait
- * for IO to complete. This will stall high-order allocations
- * but that should be acceptable to the caller
- */
- if (nr_freed < nr_taken && !current_is_kswapd() &&
- sc->lumpy_reclaim_mode) {
+ /* Check if we should syncronously wait for writeback */
+ if (should_reclaim_stall(nr_taken, nr_freed, priority, sc)) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
/*

Code: Select all
--- /usr/src/sources/kernel/zen-upstream/mm/vmscan.c 2010-07-21 17:01:20.911512995 +0200
+++ mm/vmscan.c 2010-08-04 22:11:43.663379966 +0200
@@ -1113,6 +1113,47 @@
}
/*
+ * Returns true if the caller should wait to clean dirty/writeback pages.
+ *
+ * If we are direct reclaiming for contiguous pages and we do not reclaim
+ * everything in the list, try again and wait for writeback IO to complete.
+ * This will stall high-order allocations noticeably. Only do that when really
+ * need to free the pages under high memory pressure.
+ */
+static inline bool should_reclaim_stall(unsigned long nr_taken,
+ unsigned long nr_freed,
+ int priority,
+ struct scan_control *sc)
+{
+ int lumpy_stall_priority;
+
+ /* kswapd should not stall on sync IO */
+ if (current_is_kswapd())
+ return false;
+
+ /* Only stall on lumpy reclaim */
+ if (!sc->lumpy_reclaim_mode)
+ return false;
+
+ /* If we have relaimed everything on the isolated list, no stall */
+ if (nr_freed == nr_taken)
+ return false;
+
+ /*
+ * For high-order allocations, there are two stall thresholds.
+ * High-cost allocations stall immediately where as lower
+ * order allocations such as stacks require the scanning
+ * priority to be much higher before stalling.
+ */
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+ lumpy_stall_priority = DEF_PRIORITY;
+ else
+ lumpy_stall_priority = DEF_PRIORITY / 3;
+
+ return priority <= lumpy_stall_priority;
+}
+
+/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
@@ -1202,15 +1243,8 @@
nr_scanned += nr_scan;
nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
- /*
- * If we are direct reclaiming for contiguous pages and we do
- * not reclaim everything in the list, try again and wait
- * for IO to complete. This will stall high-order allocations
- * but that should be acceptable to the caller
- */
- if (nr_freed < nr_taken && !current_is_kswapd() &&
- sc->lumpy_reclaim_mode) {
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ /* Check if we should syncronously wait for writeback */
+ if (should_reclaim_stall(nr_taken, nr_freed, priority, sc)) {
/*
* The attempt at page out may have made some
Code: Select all
mm/vmscan.c | 51 ++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 43 insertions(+), 8 deletions(-)
--- mmotm.orig/mm/vmscan.c 2010-07-20 11:21:08.000000000 +0800
+++ mmotm/mm/vmscan.c 2010-08-01 16:47:52.000000000 +0800
@@ -1113,6 +1113,47 @@ static noinline_for_stack void update_is
}
/*
+ * Returns true if the caller should wait to clean dirty/writeback pages.
+ *
+ * If we are direct reclaiming for contiguous pages and we do not reclaim
+ * everything in the list, try again and wait for writeback IO to complete.
+ * This will stall high-order allocations noticeably. Only do that when really
+ * need to free the pages under high memory pressure.
+ */
+static inline bool should_reclaim_stall(unsigned long nr_taken,
+ unsigned long nr_freed,
+ int priority,
+ struct scan_control *sc)
+{
+ int lumpy_stall_priority;
+
+ /* kswapd should not stall on sync IO */
+ if (current_is_kswapd())
+ return false;
+
+ /* Only stall on lumpy reclaim */
+ if (!sc->lumpy_reclaim_mode)
+ return false;
+
+ /* If we have relaimed everything on the isolated list, no stall */
+ if (nr_freed == nr_taken)
+ return false;
+
+ /*
+ * For high-order allocations, there are two stall thresholds.
+ * High-cost allocations stall immediately where as lower
+ * order allocations such as stacks require the scanning
+ * priority to be much higher before stalling.
+ */
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+ lumpy_stall_priority = DEF_PRIORITY;
+ else
+ lumpy_stall_priority = DEF_PRIORITY / 3;
+
+ return priority <= lumpy_stall_priority;
+}
+
+/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
@@ -1202,15 +1243,8 @@
nr_scanned += nr_scan;
nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
- /*
- * If we are direct reclaiming for contiguous pages and we do
- * not reclaim everything in the list, try again and wait
- * for IO to complete. This will stall high-order allocations
- * but that should be acceptable to the caller
- */
- if (nr_freed < nr_taken && !current_is_kswapd() &&
- sc->lumpy_reclaim_mode) {
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ /* Check if we should syncronously wait for writeback */
+ if (should_reclaim_stall(nr_taken, nr_freed, priority, sc)) {
/*
* The attempt at page out may have made someCode: Select all
patching file mm/vmscan.c
Hunk #1 succeeded at 1194 (offset 81 lines).
patch unexpectedly ends in middle of line
Hunk #2 FAILED at 1243.
1 out of 2 hunks FAILED -- saving rejects to file mm/vmscan.c.rej
Code: Select all
--- mm/vmscan.c 2010-07-20 11:21:08.000000000 +0800
+++ mm/vmscan.c 2010-08-01 16:47:52.000000000 +0800
@@ -1243,15 +1284,8 @@
nr_scanned += nr_scan;
nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
- /*
- * If we are direct reclaiming for contiguous pages and we do
- * not reclaim everything in the list, try again and wait
- * for IO to complete. This will stall high-order allocations
- * but that should be acceptable to the caller
- */
- if (nr_freed < nr_taken && !current_is_kswapd() &&
- sc->lumpy_reclaim_mode) {
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ /* Check if we should syncronously wait for writeback */
+ if (should_reclaim_stall(nr_taken, nr_freed, priority, sc)) {
/*
mm/vmscan.c.rej lines 1-20/20 (END)


Code: Select all
wget -O desktop-responsiveness_2.6.35_fix.patch http://paste.pocoo.org/raw/r59CezZil6xEbcKH5laK/
cd /usr/src/linux
patch -p6 < $OLDPWD/desktop-responsiveness_2.6.35_fix.patchAnyway, I am recompiling my kernel now. I did not know that there was a problem, but I am happy to have it fixed.# patch -p6 < /root/desktop-responsiveness_2.6.35_fix.patch
patching file mm/vmscan.c
Hunk #1 succeeded at 1112 (offset -1 lines).
patch unexpectedly ends in middle of line
Hunk #2 succeeded at 1242 with fuzz 1 (offset -1 lines).

Orly? 'piotr' at phoronix is me. And I only said, that with this patch I have lags when doing much disk activity. I haven't said that without it is better or worst, I see no improvement.Shining Arcanine wrote:Someone at Phoronix claimed dd if=/dev/zero of=test bs=1M count=5024 && rm test -f would bring unpatched systems to a crawl, but I have run it on my system in both a patched and unpatched state and I am having difficulty seeing an improvement.


Sorry, I misread your post at Phoronix.SlashBeast wrote:Orly? 'piotr' at phoronix is me. And I only said, that with this patch I have lags when doing much disk activity. I haven't said that without it is better or worst, I see no improvement.Shining Arcanine wrote:Someone at Phoronix claimed dd if=/dev/zero of=test bs=1M count=5024 && rm test -f would bring unpatched systems to a crawl, but I have run it on my system in both a patched and unpatched state and I am having difficulty seeing an improvement.

thisNaterGator wrote:I patched a few minute ago, and copying large (1.3GB) video files across two disks definitely resulted in a GUI slowdown/near halt before the patch. With the patch things are markedly improved, but not perfect.
Looking at IO wait in top still shows some significant stalling, but apparently the patch did make a difference. FWIW I noticed the issue the most when performing IO operations with particularly slow devices, like old USB sticks.
Code: Select all
CC mm/vmscan.o
CC kernel/sysctl.o
mm/vmscan.c:1163: Fehler: Redefinition von »should_reclaim_stall«
mm/vmscan.c:1122: Anmerkung: Vorherige Definition von »should_reclaim_stall« war hier
mm/vmscan.c:1204: Fehler: Redefinition von »should_reclaim_stall«
mm/vmscan.c:1163: Anmerkung: Vorherige Definition von »should_reclaim_stall« war hier
make[1]: *** [mm/vmscan.o] Fehler 1
make: *** [mm] Fehler 2

that's the one from the zen-kernel devs:Jorgo wrote:This patch for 2.6.35 applies with without error but when i compile:
https://bugzilla.kernel.org/attachment.cgi?id=27314Code: Select all
CC mm/vmscan.o CC kernel/sysctl.o mm/vmscan.c:1163: Fehler: Redefinition von »should_reclaim_stall« mm/vmscan.c:1122: Anmerkung: Vorherige Definition von »should_reclaim_stall« war hier mm/vmscan.c:1204: Fehler: Redefinition von »should_reclaim_stall« mm/vmscan.c:1163: Anmerkung: Vorherige Definition von »should_reclaim_stall« war hier make[1]: *** [mm/vmscan.o] Fehler 1 make: *** [mm] Fehler 2

there you go !lagalopex wrote:While the original patch on lkml did not remove the "congestion_wait" call, it is removed in all the posted patches in here.
And the patch was not for the mainline 2.6, its for the "-mm tree of the moment"-kernel.


it doesyoshi314 wrote:i'm guessing zen-sources is the way to go atm. not sure if it contains the complete patch.


2.6.36 in that regard is much betterdarklegion wrote:It seems that using the deadline i/o scheduler is still useful with some systems. I'm using 2.6.35-zen2 (which includes the vmscan patches) and certain large file copies results in the dreaded multiple second pauses with cfq and bfq. Using ionice helps with bfq and cfq, but at the cost of greatly reduced throughput. With deadline the pauses are gone, and throughput seems to be fine. I realise that this may not help with all systems, but it certainly helped in my case.
EDIT: Never mind, the pauses are still there. Reduced somewhat, but still there.
Does 2.6.36 include the fix from Wu Fengguang?kernelOfTruth wrote:2.6.36 in that regard is much betterdarklegion wrote:It seems that using the deadline i/o scheduler is still useful with some systems. I'm using 2.6.35-zen2 (which includes the vmscan patches) and certain large file copies results in the dreaded multiple second pauses with cfq and bfq. Using ionice helps with bfq and cfq, but at the cost of greatly reduced throughput. With deadline the pauses are gone, and throughput seems to be fine. I realise that this may not help with all systems, but it certainly helped in my case.
EDIT: Never mind, the pauses are still there. Reduced somewhat, but still there.

you mean the posted 2 patches which also were written about on phoronix.com ?devsk wrote:Does 2.6.36 include the fix from Wu Fengguang?kernelOfTruth wrote:2.6.36 in that regard is much betterdarklegion wrote:It seems that using the deadline i/o scheduler is still useful with some systems. I'm using 2.6.35-zen2 (which includes the vmscan patches) and certain large file copies results in the dreaded multiple second pauses with cfq and bfq. Using ionice helps with bfq and cfq, but at the cost of greatly reduced throughput. With deadline the pauses are gone, and throughput seems to be fine. I realise that this may not help with all systems, but it certainly helped in my case.
EDIT: Never mind, the pauses are still there. Reduced somewhat, but still there.