From fc5e1ea17bf5eb16490ace1436a1dfc2753b7c8b Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Thu, 25 Jun 2015 10:07:11 -0700 Subject: [PATCH] Revert "Revert "mm: add cma pcp list"" This reverts commit f690884e1622689dcfa11cce866e13fe5a21078f. Re-enable this patch. Earlier, this was reverted as it exposed several CMA bugs which are now fixed. So, time to re-enable this patch. Original commit text: Add a cma pcp list in order to increase cma memory utilization. Increased cma memory utilization will improve overall memory utilization because free cma pages are ignored when memory reclaim is done with gfp mask GFP_KERNEL. Since most memory reclaim is done by kswapd, which uses a gfp mask of GFP_KERNEL, by increasing cma memory utilization we are therefore ensuring that less aggressive memory reclaim takes place. Increased cma memory utilization will improve performance, for example it will increase app concurrency. Change-Id: Ia0f555427148b95068b3a7481e695ed02d58710d Signed-off-by: Liam Mark --- include/linux/mmzone.h | 8 ++-- mm/page_alloc.c | 102 ++++++++++++++++++++++++----------------- mm/vmstat.c | 2 +- 3 files changed, 65 insertions(+), 47 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 604243eef844..1d49b244c64c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -39,8 +39,6 @@ enum { MIGRATE_UNMOVABLE, MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, - MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ - MIGRATE_RESERVE = MIGRATE_PCPTYPES, #ifdef CONFIG_CMA /* * MIGRATE_CMA migration type is designed to mimic the way @@ -57,8 +55,10 @@ enum { */ MIGRATE_CMA, #endif + MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ + MIGRATE_RESERVE = MIGRATE_PCPTYPES, #ifdef CONFIG_MEMORY_ISOLATION - MIGRATE_ISOLATE, /* can't allocate from here */ + MIGRATE_ISOLATE , /* can't allocate from here */ #endif MIGRATE_TYPES }; @@ -74,9 +74,11 @@ extern int *get_migratetype_fallbacks(int mtype); #ifdef CONFIG_CMA bool is_cma_pageblock(struct page *page); # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) +# define get_cma_migrate_type() MIGRATE_CMA #else # define is_cma_pageblock(page) false # define is_migrate_cma(migratetype) false +# define get_cma_migrate_type() MIGRATE_MOVABLE #endif #define for_each_migratetype_order(order, type) \ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 316c2092ecc9..532faeb3aa4e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1155,34 +1155,12 @@ retry_reserve: return page; } -static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, - int migratetype) +static struct page *__rmqueue_cma(struct zone *zone, unsigned int order) { struct page *page = 0; -#ifdef CONFIG_CMA - if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc) - page = __rmqueue_smallest(zone, order, MIGRATE_CMA); - if (!page) -#endif -retry_reserve : - page = __rmqueue_smallest(zone, order, migratetype); - - - if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { - page = __rmqueue_fallback(zone, order, migratetype); - - /* - * Use MIGRATE_RESERVE rather than fail an allocation. goto - * is used because __rmqueue_smallest is an inline function - * and we want just one call site - */ - if (!page) { - migratetype = MIGRATE_RESERVE; - goto retry_reserve; - } - } - - trace_mm_page_alloc_zone_locked(page, order, migratetype); + if (IS_ENABLED(CONFIG_CMA)) + if (!zone->cma_alloc) + page = __rmqueue_smallest(zone, order, MIGRATE_CMA); return page; } @@ -1193,15 +1171,21 @@ retry_reserve : */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype, int cold, int cma) + int migratetype, int cold) { int mt = migratetype, i; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { struct page *page; - if (cma) - page = __rmqueue_cma(zone, order, migratetype); + + /* + * If migrate type CMA is being requested only try to + * satisfy the request with CMA pages to try and increase + * CMA utlization. + */ + if (is_migrate_cma(migratetype)) + page = __rmqueue_cma(zone, order); else page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) @@ -1236,6 +1220,27 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, return i; } +/* + * Return the pcp list that corresponds to the migrate type if that list isn't + * empty. + * If the list is empty return NULL. + */ +static struct list_head *get_populated_pcp_list(struct zone *zone, + unsigned int order, struct per_cpu_pages *pcp, + int migratetype, int cold) +{ + struct list_head *list = &pcp->lists[migratetype]; + if (list_empty(list)) { + pcp->count += rmqueue_bulk(zone, order, + pcp->batch, list, + migratetype, cold); + + if (list_empty(list)) + list = NULL; + } + return list; +} + #ifdef CONFIG_NUMA /* * Called from the vmstat counter updater to drain pagesets of this @@ -1406,8 +1411,7 @@ void free_hot_cold_page(struct page *page, int cold) * excessively into the page allocator */ if (migratetype >= MIGRATE_PCPTYPES) { - if (unlikely(is_migrate_isolate(migratetype)) || - is_migrate_cma(migratetype)) { + if (unlikely(is_migrate_isolate(migratetype))) { free_one_page(zone, page, 0, migratetype); goto out; } @@ -1549,23 +1553,33 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, int migratetype) { unsigned long flags; - struct page *page; + struct page *page = NULL; int cold = !!(gfp_flags & __GFP_COLD); again: if (likely(order == 0)) { struct per_cpu_pages *pcp; - struct list_head *list; + struct list_head *list = NULL; local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; - list = &pcp->lists[migratetype]; - if (list_empty(list)) { - pcp->count += rmqueue_bulk(zone, 0, - pcp->batch, list, - migratetype, cold, - gfp_flags & __GFP_CMA); - if (unlikely(list_empty(list))) + + /* First try to get CMA pages */ + if (migratetype == MIGRATE_MOVABLE && + gfp_flags & __GFP_CMA) { + list = get_populated_pcp_list(zone, 0, pcp, + get_cma_migrate_type(), cold); + } + + if (list == NULL) { + /* + * Either CMA is not suitable or there are no free CMA + * pages. + */ + list = get_populated_pcp_list(zone, 0, pcp, + migratetype, cold); + if (unlikely(list == NULL) || + unlikely(list_empty(list))) goto failed; } @@ -1591,10 +1605,12 @@ again: WARN_ON_ONCE(order > 1); } spin_lock_irqsave(&zone->lock, flags); - if (gfp_flags & __GFP_CMA) - page = __rmqueue_cma(zone, order, migratetype); - else + if (migratetype == MIGRATE_MOVABLE && gfp_flags & __GFP_CMA) + page = __rmqueue_cma(zone, order); + + if (!page) page = __rmqueue(zone, order, migratetype); + spin_unlock(&zone->lock); if (!page) goto failed; diff --git a/mm/vmstat.c b/mm/vmstat.c index 90c03ddecb14..fa3d7bd6d6c0 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -629,10 +629,10 @@ static char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Reclaimable", "Movable", - "Reserve", #ifdef CONFIG_CMA "CMA", #endif + "Reserve", #ifdef CONFIG_MEMORY_ISOLATION "Isolate", #endif