mm: vmscan: decide whether to compact the pgdat based on reclaim progress

In the past, kswapd makes a decision on whether to compact memory after
the pgdat was considered balanced.  This more or less worked but it is
late to make such a decision and does not fit well now that kswapd makes
a decision whether to exit the zone scanning loop depending on reclaim
progress.

This patch will compact a pgdat if at least the requested number of
pages were reclaimed from unbalanced zones for a given priority.  If any
zone is currently balanced, kswapd will not call compaction as it is
expected the necessary pages are already available.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Tested-by: Zlatko Calusic <zcalusic@bitsync.net>
Cc: dormando <dormando@rydia.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Git-commit: 2ab44f434586b8ccb11f781b4c2730492e6628f5
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
Change-Id: Ie490e6df9576de1de1bc0c3c1b634618394dcf8e
[vinmenon@codeaurora.org: resolve trivial merge conflicts]
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
Mel Gorman 2013-07-03 15:01:47 -07:00 committed by Vinayak Menon
parent 54715ebd2e
commit 89e36de3c4
1 changed files with 30 additions and 29 deletions

View File

@ -2777,7 +2777,8 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
*/
static bool kswapd_shrink_zone(struct zone *zone,
struct scan_control *sc,
unsigned long lru_pages)
unsigned long lru_pages,
unsigned long *nr_attempted)
{
unsigned long nr_slab;
struct reclaim_state *reclaim_state = current->reclaim_state;
@ -2793,6 +2794,9 @@ static bool kswapd_shrink_zone(struct zone *zone,
nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
/* Account for the number of pages attempted to reclaim */
*nr_attempted += sc->nr_to_reclaim;
return sc->nr_scanned >= sc->nr_to_reclaim;
}
@ -2837,7 +2841,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
do {
unsigned long lru_pages = 0;
unsigned long nr_attempted = 0;
bool raise_priority = true;
bool pgdat_needs_compaction = (order > 0);
sc.nr_reclaimed = 0;
@ -2887,7 +2893,21 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
lru_pages += zone_reclaimable_pages(zone);
/*
* If any zone is currently balanced then kswapd will
* not call compaction as it is expected that the
* necessary pages are already available.
*/
if (pgdat_needs_compaction &&
zone_watermark_ok(zone, order,
low_wmark_pages(zone),
*classzone_idx, 0))
pgdat_needs_compaction = false;
}
/*
@ -2956,7 +2976,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
* already being scanned that high
* watermark would be met at 100% efficiency.
*/
if (kswapd_shrink_zone(zone, &sc, lru_pages))
if (kswapd_shrink_zone(zone, &sc, lru_pages,
&nr_attempted))
raise_priority = false;
}
@ -3008,6 +3029,13 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
if (try_to_freeze() || kthread_should_stop())
break;
/*
* Compact if necessary and kswapd is reclaiming at least the
* high watermark number of pages as requsted
*/
if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
compact_pgdat(pgdat, order);
/*
* Raise priority if scanning rate is too low or there was no
* progress in reclaiming pages
@ -3017,33 +3045,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
} while (sc.priority >= 0 &&
!pgdat_balanced(pgdat, order, *classzone_idx));
/*
* If kswapd was reclaiming at a higher order, it has the option of
* sleeping without all zones being balanced. Before it does, it must
* ensure that the watermarks for order-0 on *all* zones are met and
* that the congestion flags are cleared. The congestion flag must
* be cleared as kswapd is the only mechanism that clears the flag
* and it is potentially going to sleep here.
*/
if (order) {
int zones_need_compaction = 1;
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
/* Check if the memory needs to be defragmented. */
if (zone_watermark_ok(zone, order,
low_wmark_pages(zone), *classzone_idx, 0))
zones_need_compaction = 0;
}
if (zones_need_compaction)
compact_pgdat(pgdat, order);
}
out:
/*
* Return the order we were reclaiming at so prepare_kswapd_sleep()