Revert "android/lowmemorykiller: Selectively count free CMA pages"

This reverts commit 06e8520b10.
This commit is contained in:
Thierry Strudel 2015-12-22 10:10:08 -08:00 committed by syphyr
parent 0675816e5b
commit 6b1475d492
3 changed files with 14 additions and 85 deletions

View File

@ -96,47 +96,16 @@ static int test_task_flag(struct task_struct *p, int flag)
static DEFINE_MUTEX(scan_mutex);
int can_use_cma_pages(gfp_t gfp_mask)
{
int can_use = 0;
int mtype = allocflags_to_migratetype(gfp_mask);
int i = 0;
int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
if (is_migrate_cma(mtype)) {
can_use = 1;
} else {
for (i = 0;; i++) {
int fallbacktype = mtype_fallbacks[i];
if (is_migrate_cma(fallbacktype)) {
can_use = 1;
break;
}
if (fallbacktype == MIGRATE_RESERVE)
break;
}
}
return can_use;
}
void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
int *other_free, int *other_file,
int use_cma_pages)
int *other_free, int *other_file)
{
struct zone *zone;
struct zoneref *zoneref;
int zone_idx;
for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
zone_idx = zonelist_zone_idx(zoneref);
if (zone_idx == ZONE_MOVABLE) {
if (!use_cma_pages)
*other_free -=
zone_page_state(zone, NR_FREE_CMA_PAGES);
if ((zone_idx = zonelist_zone_idx(zoneref)) == ZONE_MOVABLE)
continue;
}
if (zone_idx > classzone_idx) {
if (other_free != NULL)
@ -147,22 +116,12 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
NR_FILE_PAGES)
- zone_page_state(zone, NR_SHMEM);
} else if (zone_idx < classzone_idx) {
if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) {
if (!use_cma_pages) {
*other_free -= min(
zone->lowmem_reserve[classzone_idx] +
zone_page_state(
zone, NR_FREE_CMA_PAGES),
zone_page_state(
zone, NR_FREE_PAGES));
} else {
*other_free -=
zone->lowmem_reserve[classzone_idx];
}
} else {
if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0))
*other_free -=
zone_page_state(zone, NR_FREE_PAGES);
}
zone->lowmem_reserve[classzone_idx];
else
*other_free -=
zone_page_state(zone, NR_FREE_PAGES);
}
}
}
@ -174,14 +133,12 @@ void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
struct zonelist *zonelist;
enum zone_type high_zoneidx, classzone_idx;
unsigned long balance_gap;
int use_cma_pages;
gfp_mask = sc->gfp_mask;
zonelist = node_zonelist(0, gfp_mask);
high_zoneidx = gfp_zone(gfp_mask);
first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone);
classzone_idx = zone_idx(preferred_zone);
use_cma_pages = can_use_cma_pages(gfp_mask);
balance_gap = min(low_wmark_pages(preferred_zone),
(preferred_zone->present_pages +
@ -193,38 +150,23 @@ void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
balance_gap, 0, 0))) {
if (lmk_fast_run)
tune_lmk_zone_param(zonelist, classzone_idx, other_free,
other_file, use_cma_pages);
other_file);
else
tune_lmk_zone_param(zonelist, classzone_idx, other_free,
NULL, use_cma_pages);
NULL);
if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
if (!use_cma_pages) {
*other_free -= min(
preferred_zone->lowmem_reserve[_ZONE]
+ zone_page_state(
preferred_zone, NR_FREE_CMA_PAGES),
zone_page_state(
preferred_zone, NR_FREE_PAGES));
} else {
*other_free -=
preferred_zone->lowmem_reserve[_ZONE];
}
} else {
if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0))
*other_free -=
preferred_zone->lowmem_reserve[_ZONE];
else
*other_free -= zone_page_state(preferred_zone,
NR_FREE_PAGES);
}
lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
"ofree %d, %d\n", *other_free, *other_file);
} else {
tune_lmk_zone_param(zonelist, classzone_idx, other_free,
other_file, use_cma_pages);
if (!use_cma_pages) {
*other_free -=
zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
}
other_file);
lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
"%d\n", *other_free, *other_file);

View File

@ -63,14 +63,6 @@ enum {
MIGRATE_TYPES
};
/*
* Returns a list which contains the migrate types on to which
* an allocation falls back when the free list for the migrate
* type mtype is depleted.
* The end of the list is delimited by the type MIGRATE_RESERVE.
*/
extern int *get_migratetype_fallbacks(int mtype);
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)

View File

@ -1002,11 +1002,6 @@ static int fallbacks[MIGRATE_TYPES][4] = {
#endif
};
int *get_migratetype_fallbacks(int mtype)
{
return fallbacks[mtype];
}
/*
* Move the free pages in a range to the free lists of the requested type.
* Note that start_page and end_pages are not aligned on a pageblock