Merge "Revert "Revert "mm: add cma pcp list"""

This commit is contained in:
Linux Build Service Account 2015-06-29 17:51:52 -07:00 committed by Gerrit - the friendly Code Review server
commit 595ccf4c15
3 changed files with 65 additions and 47 deletions

View file

@ -39,8 +39,6 @@ enum {
MIGRATE_UNMOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_MOVABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_RESERVE = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@ -57,8 +55,10 @@ enum {
*/
MIGRATE_CMA,
#endif
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_RESERVE = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
MIGRATE_ISOLATE , /* can't allocate from here */
#endif
MIGRATE_TYPES
};
@ -74,9 +74,11 @@ extern int *get_migratetype_fallbacks(int mtype);
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define get_cma_migrate_type() MIGRATE_CMA
#else
# define is_cma_pageblock(page) false
# define is_migrate_cma(migratetype) false
# define get_cma_migrate_type() MIGRATE_MOVABLE
#endif
#define for_each_migratetype_order(order, type) \

View file

@ -1155,34 +1155,12 @@ retry_reserve:
return page;
}
static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
int migratetype)
static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
struct page *page = 0;
#ifdef CONFIG_CMA
if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
page = __rmqueue_smallest(zone, order, MIGRATE_CMA);
if (!page)
#endif
retry_reserve :
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
page = __rmqueue_fallback(zone, order, migratetype);
/*
* Use MIGRATE_RESERVE rather than fail an allocation. goto
* is used because __rmqueue_smallest is an inline function
* and we want just one call site
*/
if (!page) {
migratetype = MIGRATE_RESERVE;
goto retry_reserve;
}
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
if (IS_ENABLED(CONFIG_CMA))
if (!zone->cma_alloc)
page = __rmqueue_smallest(zone, order, MIGRATE_CMA);
return page;
}
@ -1193,15 +1171,21 @@ retry_reserve :
*/
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, int cold, int cma)
int migratetype, int cold)
{
int mt = migratetype, i;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page;
if (cma)
page = __rmqueue_cma(zone, order, migratetype);
/*
* If migrate type CMA is being requested only try to
* satisfy the request with CMA pages to try and increase
* CMA utlization.
*/
if (is_migrate_cma(migratetype))
page = __rmqueue_cma(zone, order);
else
page = __rmqueue(zone, order, migratetype);
if (unlikely(page == NULL))
@ -1236,6 +1220,27 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return i;
}
/*
* Return the pcp list that corresponds to the migrate type if that list isn't
* empty.
* If the list is empty return NULL.
*/
static struct list_head *get_populated_pcp_list(struct zone *zone,
unsigned int order, struct per_cpu_pages *pcp,
int migratetype, int cold)
{
struct list_head *list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, order,
pcp->batch, list,
migratetype, cold);
if (list_empty(list))
list = NULL;
}
return list;
}
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@ -1406,8 +1411,7 @@ void free_hot_cold_page(struct page *page, int cold)
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
if (unlikely(is_migrate_isolate(migratetype)) ||
is_migrate_cma(migratetype)) {
if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, 0, migratetype);
goto out;
}
@ -1549,23 +1553,33 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
int migratetype)
{
unsigned long flags;
struct page *page;
struct page *page = NULL;
int cold = !!(gfp_flags & __GFP_COLD);
again:
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;
struct list_head *list = NULL;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold,
gfp_flags & __GFP_CMA);
if (unlikely(list_empty(list)))
/* First try to get CMA pages */
if (migratetype == MIGRATE_MOVABLE &&
gfp_flags & __GFP_CMA) {
list = get_populated_pcp_list(zone, 0, pcp,
get_cma_migrate_type(), cold);
}
if (list == NULL) {
/*
* Either CMA is not suitable or there are no free CMA
* pages.
*/
list = get_populated_pcp_list(zone, 0, pcp,
migratetype, cold);
if (unlikely(list == NULL) ||
unlikely(list_empty(list)))
goto failed;
}
@ -1591,10 +1605,12 @@ again:
WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
if (gfp_flags & __GFP_CMA)
page = __rmqueue_cma(zone, order, migratetype);
else
if (migratetype == MIGRATE_MOVABLE && gfp_flags & __GFP_CMA)
page = __rmqueue_cma(zone, order);
if (!page)
page = __rmqueue(zone, order, migratetype);
spin_unlock(&zone->lock);
if (!page)
goto failed;

View file

@ -629,10 +629,10 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
"Reclaimable",
"Movable",
"Reserve",
#ifdef CONFIG_CMA
"CMA",
#endif
"Reserve",
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif