mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
vmscan: only defer compaction for failed order and higher
Currently a failed order-9 (transparent hugepage) compaction can lead to memory compaction being temporarily disabled for a memory zone. Even if we only need compaction for an order 2 allocation, eg. for jumbo frames networking. The fix is relatively straightforward: keep track of the highest order at which compaction is succeeding, and only defer compaction for orders at which compaction is failing. Signed-off-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Hillf Danton <dhillf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7be62de99a
commit
aff622495c
5 changed files with 27 additions and 8 deletions
|
@ -34,20 +34,26 @@ extern unsigned long compaction_suitable(struct zone *zone, int order);
|
||||||
* allocation success. 1 << compact_defer_limit compactions are skipped up
|
* allocation success. 1 << compact_defer_limit compactions are skipped up
|
||||||
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
|
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
|
||||||
*/
|
*/
|
||||||
static inline void defer_compaction(struct zone *zone)
|
static inline void defer_compaction(struct zone *zone, int order)
|
||||||
{
|
{
|
||||||
zone->compact_considered = 0;
|
zone->compact_considered = 0;
|
||||||
zone->compact_defer_shift++;
|
zone->compact_defer_shift++;
|
||||||
|
|
||||||
|
if (order < zone->compact_order_failed)
|
||||||
|
zone->compact_order_failed = order;
|
||||||
|
|
||||||
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
|
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
|
||||||
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
|
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns true if compaction should be skipped this time */
|
/* Returns true if compaction should be skipped this time */
|
||||||
static inline bool compaction_deferred(struct zone *zone)
|
static inline bool compaction_deferred(struct zone *zone, int order)
|
||||||
{
|
{
|
||||||
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
|
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
|
||||||
|
|
||||||
|
if (order < zone->compact_order_failed)
|
||||||
|
return false;
|
||||||
|
|
||||||
/* Avoid possible overflow */
|
/* Avoid possible overflow */
|
||||||
if (++zone->compact_considered > defer_limit)
|
if (++zone->compact_considered > defer_limit)
|
||||||
zone->compact_considered = defer_limit;
|
zone->compact_considered = defer_limit;
|
||||||
|
@ -73,11 +79,11 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
|
||||||
return COMPACT_SKIPPED;
|
return COMPACT_SKIPPED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void defer_compaction(struct zone *zone)
|
static inline void defer_compaction(struct zone *zone, int order)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool compaction_deferred(struct zone *zone)
|
static inline bool compaction_deferred(struct zone *zone, int order)
|
||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -365,6 +365,7 @@ struct zone {
|
||||||
*/
|
*/
|
||||||
unsigned int compact_considered;
|
unsigned int compact_considered;
|
||||||
unsigned int compact_defer_shift;
|
unsigned int compact_defer_shift;
|
||||||
|
int compact_order_failed;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ZONE_PADDING(_pad1_)
|
ZONE_PADDING(_pad1_)
|
||||||
|
|
|
@ -695,9 +695,19 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
|
||||||
INIT_LIST_HEAD(&cc->freepages);
|
INIT_LIST_HEAD(&cc->freepages);
|
||||||
INIT_LIST_HEAD(&cc->migratepages);
|
INIT_LIST_HEAD(&cc->migratepages);
|
||||||
|
|
||||||
if (cc->order < 0 || !compaction_deferred(zone))
|
if (cc->order < 0 || !compaction_deferred(zone, cc->order))
|
||||||
compact_zone(zone, cc);
|
compact_zone(zone, cc);
|
||||||
|
|
||||||
|
if (cc->order > 0) {
|
||||||
|
int ok = zone_watermark_ok(zone, cc->order,
|
||||||
|
low_wmark_pages(zone), 0, 0);
|
||||||
|
if (ok && cc->order > zone->compact_order_failed)
|
||||||
|
zone->compact_order_failed = cc->order + 1;
|
||||||
|
/* Currently async compaction is never deferred. */
|
||||||
|
else if (!ok && cc->sync)
|
||||||
|
defer_compaction(zone, cc->order);
|
||||||
|
}
|
||||||
|
|
||||||
VM_BUG_ON(!list_empty(&cc->freepages));
|
VM_BUG_ON(!list_empty(&cc->freepages));
|
||||||
VM_BUG_ON(!list_empty(&cc->migratepages));
|
VM_BUG_ON(!list_empty(&cc->migratepages));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1990,7 +1990,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||||
if (!order)
|
if (!order)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (compaction_deferred(preferred_zone)) {
|
if (compaction_deferred(preferred_zone, order)) {
|
||||||
*deferred_compaction = true;
|
*deferred_compaction = true;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -2012,6 +2012,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||||
if (page) {
|
if (page) {
|
||||||
preferred_zone->compact_considered = 0;
|
preferred_zone->compact_considered = 0;
|
||||||
preferred_zone->compact_defer_shift = 0;
|
preferred_zone->compact_defer_shift = 0;
|
||||||
|
if (order >= preferred_zone->compact_order_failed)
|
||||||
|
preferred_zone->compact_order_failed = order + 1;
|
||||||
count_vm_event(COMPACTSUCCESS);
|
count_vm_event(COMPACTSUCCESS);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
@ -2028,7 +2030,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||||
* defer if the failure was a sync compaction failure.
|
* defer if the failure was a sync compaction failure.
|
||||||
*/
|
*/
|
||||||
if (sync_migration)
|
if (sync_migration)
|
||||||
defer_compaction(preferred_zone);
|
defer_compaction(preferred_zone, order);
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
|
@ -2198,7 +2198,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
|
||||||
* If compaction is deferred, reclaim up to a point where
|
* If compaction is deferred, reclaim up to a point where
|
||||||
* compaction will have a chance of success when re-enabled
|
* compaction will have a chance of success when re-enabled
|
||||||
*/
|
*/
|
||||||
if (compaction_deferred(zone))
|
if (compaction_deferred(zone, sc->order))
|
||||||
return watermark_ok;
|
return watermark_ok;
|
||||||
|
|
||||||
/* If compaction is not ready to start, keep reclaiming */
|
/* If compaction is not ready to start, keep reclaiming */
|
||||||
|
|
Loading…
Reference in a new issue