mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
mm: cma: on movable allocations try MIGRATE_CMA first
It has been observed that system tends to keep a lot of CMA free pages even in very high memory pressure use cases. The CMA fallback for movable pages is used very rarely, only when system is completely pruned from MOVABLE pages. This means that the out-of-memory is triggered for unmovable allocations even when there are many CMA pages available. This problem was not observed previously since movable pages were used as a fallback for unmovable allocations. To avoid such situation this commit changes the allocation order so that on movable allocations the MIGRATE_CMA pageblocks are used first. This change means that the MIGRATE_CMA can be removed from fallback path of the MIGRATE_MOVABLE type. This means that the __rmqueue_fallback() function will never deal with CMA pages and thus all the checks around MIGRATE_CMA can be removed from that function. Change-Id: Ie13312d62a6af12d7aa78b4283ed25535a6d49fd CRs-Fixed: 435287 Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Reported-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
parent
00cc84cfac
commit
a1357d5f1b
1 changed files with 26 additions and 31 deletions
|
@ -913,14 +913,12 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
|||
* This array describes the order lists are fallen back to when
|
||||
* the free lists for the desirable migrate type are depleted
|
||||
*/
|
||||
static int fallbacks[MIGRATE_TYPES][4] = {
|
||||
static int fallbacks[MIGRATE_TYPES][3] = {
|
||||
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
||||
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
||||
#ifdef CONFIG_CMA
|
||||
[MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
||||
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
|
||||
#else
|
||||
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
||||
#ifdef CONFIG_CMA
|
||||
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
|
||||
#endif
|
||||
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
|
||||
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
|
||||
|
@ -1043,17 +1041,10 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
|||
* pages to the preferred allocation list. If falling
|
||||
* back for a reclaimable kernel allocation, be more
|
||||
* aggressive about taking ownership of free pages
|
||||
*
|
||||
* On the other hand, never change migration
|
||||
* type of MIGRATE_CMA pageblocks nor move CMA
|
||||
* pages on different free lists. We don't
|
||||
* want unmovable pages to be allocated from
|
||||
* MIGRATE_CMA areas.
|
||||
*/
|
||||
if (!is_migrate_cma(migratetype) &&
|
||||
(unlikely(current_order >= pageblock_order / 2) ||
|
||||
start_migratetype == MIGRATE_RECLAIMABLE ||
|
||||
page_group_by_mobility_disabled)) {
|
||||
if (unlikely(current_order >= pageblock_order / 2) ||
|
||||
start_migratetype == MIGRATE_RECLAIMABLE ||
|
||||
page_group_by_mobility_disabled) {
|
||||
int pages;
|
||||
pages = move_freepages_block(zone, page,
|
||||
start_migratetype);
|
||||
|
@ -1072,14 +1063,12 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
|||
rmv_page_order(page);
|
||||
|
||||
/* Take ownership for orders >= pageblock_order */
|
||||
if (current_order >= pageblock_order &&
|
||||
!is_migrate_cma(migratetype))
|
||||
if (current_order >= pageblock_order)
|
||||
change_pageblock_range(page, current_order,
|
||||
start_migratetype);
|
||||
|
||||
expand(zone, page, order, current_order, area,
|
||||
is_migrate_cma(migratetype)
|
||||
? migratetype : start_migratetype);
|
||||
start_migratetype);
|
||||
|
||||
trace_mm_page_alloc_extfrag(page, order, current_order,
|
||||
start_migratetype, migratetype);
|
||||
|
@ -1100,21 +1089,27 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
|
|||
{
|
||||
struct page *page;
|
||||
|
||||
retry_reserve:
|
||||
page = __rmqueue_smallest(zone, order, migratetype);
|
||||
#ifdef CONFIG_CMA
|
||||
if (migratetype == MIGRATE_MOVABLE)
|
||||
migratetype = MIGRATE_CMA;
|
||||
#endif
|
||||
|
||||
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
|
||||
page = __rmqueue_fallback(zone, order, migratetype);
|
||||
for(;;) {
|
||||
page = __rmqueue_smallest(zone, order, migratetype);
|
||||
if (likely(page) || migratetype == MIGRATE_RESERVE)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Use MIGRATE_RESERVE rather than fail an allocation. goto
|
||||
* is used because __rmqueue_smallest is an inline function
|
||||
* and we want just one call site
|
||||
*/
|
||||
if (!page) {
|
||||
migratetype = MIGRATE_RESERVE;
|
||||
goto retry_reserve;
|
||||
if (is_migrate_cma(migratetype)) {
|
||||
migratetype = MIGRATE_MOVABLE;
|
||||
continue;
|
||||
}
|
||||
|
||||
page = __rmqueue_fallback(zone, order, migratetype);
|
||||
if (page)
|
||||
break;
|
||||
|
||||
/* Use MIGRATE_RESERVE rather than fail an allocation. */
|
||||
migratetype = MIGRATE_RESERVE;
|
||||
}
|
||||
|
||||
trace_mm_page_alloc_zone_locked(page, order, migratetype);
|
||||
|
|
Loading…
Reference in a new issue