mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
Revert "mm: cma: on movable allocations try MIGRATE_CMA first"
This reverts commit b5662d64fa5ee483b985b351dec993402422fee3. Using CMA pages first creates good utilization but has some unfortunate side effects. Many movable allocations come from the filesystem layer which can hold on to pages for long periods of time which causes high allocation times (~200ms) and high rates of failure. Revert this patch and use alternate allocation strategies to get better utilization. Change-Id: I917e137d5fb292c9f8282506f71a799a6451ccfa CRs-Fixed: 452508 Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
parent
ee57020c2b
commit
2390206ce0
1 changed files with 30 additions and 25 deletions
|
@ -913,12 +913,14 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
|||
* This array describes the order lists are fallen back to when
|
||||
* the free lists for the desirable migrate type are depleted
|
||||
*/
|
||||
static int fallbacks[MIGRATE_TYPES][3] = {
|
||||
static int fallbacks[MIGRATE_TYPES][4] = {
|
||||
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
||||
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
||||
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
||||
#ifdef CONFIG_CMA
|
||||
[MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
||||
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
|
||||
#else
|
||||
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
||||
#endif
|
||||
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
|
||||
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
|
||||
|
@ -1041,10 +1043,17 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
|||
* pages to the preferred allocation list. If falling
|
||||
* back for a reclaimable kernel allocation, be more
|
||||
* aggressive about taking ownership of free pages
|
||||
*
|
||||
* On the other hand, never change migration
|
||||
* type of MIGRATE_CMA pageblocks nor move CMA
|
||||
* pages on different free lists. We don't
|
||||
* want unmovable pages to be allocated from
|
||||
* MIGRATE_CMA areas.
|
||||
*/
|
||||
if (unlikely(current_order >= pageblock_order / 2) ||
|
||||
start_migratetype == MIGRATE_RECLAIMABLE ||
|
||||
page_group_by_mobility_disabled) {
|
||||
if (!is_migrate_cma(migratetype) &&
|
||||
(unlikely(current_order >= pageblock_order / 2) ||
|
||||
start_migratetype == MIGRATE_RECLAIMABLE ||
|
||||
page_group_by_mobility_disabled)) {
|
||||
int pages;
|
||||
pages = move_freepages_block(zone, page,
|
||||
start_migratetype);
|
||||
|
@ -1063,12 +1072,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
|||
rmv_page_order(page);
|
||||
|
||||
/* Take ownership for orders >= pageblock_order */
|
||||
if (current_order >= pageblock_order)
|
||||
if (current_order >= pageblock_order &&
|
||||
!is_migrate_cma(migratetype))
|
||||
change_pageblock_range(page, current_order,
|
||||
start_migratetype);
|
||||
|
||||
expand(zone, page, order, current_order, area,
|
||||
start_migratetype);
|
||||
is_migrate_cma(migratetype)
|
||||
? migratetype : start_migratetype);
|
||||
|
||||
trace_mm_page_alloc_extfrag(page, order, current_order,
|
||||
start_migratetype, migratetype);
|
||||
|
@ -1089,27 +1100,21 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
|
|||
{
|
||||
struct page *page;
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
if (migratetype == MIGRATE_MOVABLE)
|
||||
migratetype = MIGRATE_CMA;
|
||||
#endif
|
||||
|
||||
for(;;) {
|
||||
page = __rmqueue_smallest(zone, order, migratetype);
|
||||
if (likely(page) || migratetype == MIGRATE_RESERVE)
|
||||
break;
|
||||
|
||||
if (is_migrate_cma(migratetype)) {
|
||||
migratetype = MIGRATE_MOVABLE;
|
||||
continue;
|
||||
}
|
||||
retry_reserve:
|
||||
page = __rmqueue_smallest(zone, order, migratetype);
|
||||
|
||||
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
|
||||
page = __rmqueue_fallback(zone, order, migratetype);
|
||||
if (page)
|
||||
break;
|
||||
|
||||
/* Use MIGRATE_RESERVE rather than fail an allocation. */
|
||||
migratetype = MIGRATE_RESERVE;
|
||||
/*
|
||||
* Use MIGRATE_RESERVE rather than fail an allocation. goto
|
||||
* is used because __rmqueue_smallest is an inline function
|
||||
* and we want just one call site
|
||||
*/
|
||||
if (!page) {
|
||||
migratetype = MIGRATE_RESERVE;
|
||||
goto retry_reserve;
|
||||
}
|
||||
}
|
||||
|
||||
trace_mm_page_alloc_zone_locked(page, order, migratetype);
|
||||
|
|
Loading…
Reference in a new issue