mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
[PATCH] shrink_all_memory(): fix lru_pages handling
At the end of shrink_all_memory() we forget to recalculate lru_pages: it can be zero. Fix that up, and add a helper function for this operation too. Also, recalculate lru_pages each time around the inner loop to get the balancing correct. Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Pavel Machek <pavel@ucw.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
d63b70902b
commit
76395d3761
1 changed files with 16 additions and 17 deletions
33
mm/vmscan.c
33
mm/vmscan.c
|
@ -1406,6 +1406,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long count_lru_pages(void)
|
||||||
|
{
|
||||||
|
struct zone *zone;
|
||||||
|
unsigned long ret = 0;
|
||||||
|
|
||||||
|
for_each_zone(zone)
|
||||||
|
ret += zone->nr_active + zone->nr_inactive;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to free `nr_pages' of memory, system-wide, and return the number of
|
* Try to free `nr_pages' of memory, system-wide, and return the number of
|
||||||
* freed pages.
|
* freed pages.
|
||||||
|
@ -1420,7 +1430,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
|
||||||
unsigned long ret = 0;
|
unsigned long ret = 0;
|
||||||
int pass;
|
int pass;
|
||||||
struct reclaim_state reclaim_state;
|
struct reclaim_state reclaim_state;
|
||||||
struct zone *zone;
|
|
||||||
struct scan_control sc = {
|
struct scan_control sc = {
|
||||||
.gfp_mask = GFP_KERNEL,
|
.gfp_mask = GFP_KERNEL,
|
||||||
.may_swap = 0,
|
.may_swap = 0,
|
||||||
|
@ -1431,10 +1440,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
|
||||||
|
|
||||||
current->reclaim_state = &reclaim_state;
|
current->reclaim_state = &reclaim_state;
|
||||||
|
|
||||||
lru_pages = 0;
|
lru_pages = count_lru_pages();
|
||||||
for_each_zone(zone)
|
|
||||||
lru_pages += zone->nr_active + zone->nr_inactive;
|
|
||||||
|
|
||||||
nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
|
nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
|
||||||
/* If slab caches are huge, it's better to hit them first */
|
/* If slab caches are huge, it's better to hit them first */
|
||||||
while (nr_slab >= lru_pages) {
|
while (nr_slab >= lru_pages) {
|
||||||
|
@ -1461,13 +1467,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
|
||||||
for (pass = 0; pass < 5; pass++) {
|
for (pass = 0; pass < 5; pass++) {
|
||||||
int prio;
|
int prio;
|
||||||
|
|
||||||
/* Needed for shrinking slab caches later on */
|
|
||||||
if (!lru_pages)
|
|
||||||
for_each_zone(zone) {
|
|
||||||
lru_pages += zone->nr_active;
|
|
||||||
lru_pages += zone->nr_inactive;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Force reclaiming mapped pages in the passes #3 and #4 */
|
/* Force reclaiming mapped pages in the passes #3 and #4 */
|
||||||
if (pass > 2) {
|
if (pass > 2) {
|
||||||
sc.may_swap = 1;
|
sc.may_swap = 1;
|
||||||
|
@ -1483,7 +1482,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
reclaim_state.reclaimed_slab = 0;
|
reclaim_state.reclaimed_slab = 0;
|
||||||
shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
|
shrink_slab(sc.nr_scanned, sc.gfp_mask,
|
||||||
|
count_lru_pages());
|
||||||
ret += reclaim_state.reclaimed_slab;
|
ret += reclaim_state.reclaimed_slab;
|
||||||
if (ret >= nr_pages)
|
if (ret >= nr_pages)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1491,20 +1491,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
|
||||||
if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
|
if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
|
||||||
congestion_wait(WRITE, HZ / 10);
|
congestion_wait(WRITE, HZ / 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
lru_pages = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If ret = 0, we could not shrink LRUs, but there may be something
|
* If ret = 0, we could not shrink LRUs, but there may be something
|
||||||
* in slab caches
|
* in slab caches
|
||||||
*/
|
*/
|
||||||
if (!ret)
|
if (!ret) {
|
||||||
do {
|
do {
|
||||||
reclaim_state.reclaimed_slab = 0;
|
reclaim_state.reclaimed_slab = 0;
|
||||||
shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
|
shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
|
||||||
ret += reclaim_state.reclaimed_slab;
|
ret += reclaim_state.reclaimed_slab;
|
||||||
} while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
|
} while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
current->reclaim_state = NULL;
|
current->reclaim_state = NULL;
|
||||||
|
|
Loading…
Reference in a new issue