mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
cma: fix watermark checking
* Add ALLOC_CMA alloc flag and pass it to [__]zone_watermark_ok() (from Minchan Kim). * During watermark check decrease available free pages number by free CMA pages number if necessary (unmovable allocations cannot use pages from CMA areas). CRs-Fixed: 446321 Change-Id: Ibd069b028eb80b70701c1b81cb28a503d8265be0 Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [lauraa@codeaurora.org: context fixups] Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
parent
a21d55687a
commit
deba57a4a7
3 changed files with 36 additions and 17 deletions
|
@ -750,6 +750,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|||
struct zoneref *z;
|
||||
struct zone *zone;
|
||||
int rc = COMPACT_SKIPPED;
|
||||
int alloc_flags = 0;
|
||||
|
||||
/*
|
||||
* Check whether it is worth even starting compaction. The order check is
|
||||
|
@ -761,6 +762,10 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|||
|
||||
count_vm_event(COMPACTSTALL);
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
|
||||
alloc_flags |= ALLOC_CMA;
|
||||
#endif
|
||||
/* Compact each zone in the list */
|
||||
for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
|
||||
nodemask) {
|
||||
|
@ -770,7 +775,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|||
rc = max(status, rc);
|
||||
|
||||
/* If a normal allocation would succeed, stop compacting */
|
||||
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
|
||||
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
|
||||
alloc_flags))
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -342,3 +342,17 @@ extern u64 hwpoison_filter_flags_mask;
|
|||
extern u64 hwpoison_filter_flags_value;
|
||||
extern u64 hwpoison_filter_memcg;
|
||||
extern u32 hwpoison_filter_enable;
|
||||
|
||||
/* The ALLOC_WMARK bits are used as an index to zone->watermark */
|
||||
#define ALLOC_WMARK_MIN WMARK_MIN
|
||||
#define ALLOC_WMARK_LOW WMARK_LOW
|
||||
#define ALLOC_WMARK_HIGH WMARK_HIGH
|
||||
#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
|
||||
|
||||
/* Mask to get the watermark bits */
|
||||
#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
|
||||
|
||||
#define ALLOC_HARDER 0x10 /* try to alloc harder */
|
||||
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
|
||||
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
|
||||
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
|
||||
|
|
|
@ -1524,19 +1524,6 @@ failed:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* The ALLOC_WMARK bits are used as an index to zone->watermark */
|
||||
#define ALLOC_WMARK_MIN WMARK_MIN
|
||||
#define ALLOC_WMARK_LOW WMARK_LOW
|
||||
#define ALLOC_WMARK_HIGH WMARK_HIGH
|
||||
#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
|
||||
|
||||
/* Mask to get the watermark bits */
|
||||
#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
|
||||
|
||||
#define ALLOC_HARDER 0x10 /* try to alloc harder */
|
||||
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
|
||||
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
|
||||
|
||||
#ifdef CONFIG_FAIL_PAGE_ALLOC
|
||||
|
||||
static struct {
|
||||
|
@ -1631,7 +1618,11 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
|||
min -= min / 2;
|
||||
if (alloc_flags & ALLOC_HARDER)
|
||||
min -= min / 4;
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
/* If allocation can't use CMA areas don't use free CMA pages */
|
||||
if (!(alloc_flags & ALLOC_CMA))
|
||||
free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
|
||||
#endif
|
||||
if (free_pages <= min + lowmem_reserve)
|
||||
return false;
|
||||
for (o = 0; o < order; o++) {
|
||||
|
@ -2300,7 +2291,10 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
|
|||
unlikely(test_thread_flag(TIF_MEMDIE))))
|
||||
alloc_flags |= ALLOC_NO_WATERMARKS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
|
||||
alloc_flags |= ALLOC_CMA;
|
||||
#endif
|
||||
return alloc_flags;
|
||||
}
|
||||
|
||||
|
@ -2509,6 +2503,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|||
struct page *page = NULL;
|
||||
int migratetype = allocflags_to_migratetype(gfp_mask);
|
||||
unsigned int cpuset_mems_cookie;
|
||||
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
|
||||
|
||||
gfp_mask &= gfp_allowed_mask;
|
||||
|
||||
|
@ -2537,9 +2532,13 @@ retry_cpuset:
|
|||
if (!preferred_zone)
|
||||
goto out;
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
|
||||
alloc_flags |= ALLOC_CMA;
|
||||
#endif
|
||||
/* First allocation attempt */
|
||||
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
|
||||
zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
|
||||
zonelist, high_zoneidx, alloc_flags,
|
||||
preferred_zone, migratetype);
|
||||
if (unlikely(!page))
|
||||
page = __alloc_pages_slowpath(gfp_mask, order,
|
||||
|
|
Loading…
Reference in a new issue