mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
memory-hotplug: fix pages missed by race rather than failing
If race between allocation and isolation in memory-hotplug offline happens, some pages could be in MIGRATE_MOVABLE of free_list although the pageblock's migratetype of the page is MIGRATE_ISOLATE. The race could be detected by get_freepage_migratetype in __test_page_isolated_in_pageblock. If it is detected, now EBUSY gets bubbled all the way up and the hotplug operations fails. But better idea is instead of returning and failing memory-hotremove, move the free page to the correct list at the time it is detected. It could enhance memory-hotremove operation success ratio although the race is really rare. Suggested by Mel Gorman. [akpm@linux-foundation.org: small cleanup] Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
41d575ad4a
commit
435b405c06
3 changed files with 19 additions and 3 deletions
|
@ -6,6 +6,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count);
|
||||||
void set_pageblock_migratetype(struct page *page, int migratetype);
|
void set_pageblock_migratetype(struct page *page, int migratetype);
|
||||||
int move_freepages_block(struct zone *zone, struct page *page,
|
int move_freepages_block(struct zone *zone, struct page *page,
|
||||||
int migratetype);
|
int migratetype);
|
||||||
|
int move_freepages(struct zone *zone,
|
||||||
|
struct page *start_page, struct page *end_page,
|
||||||
|
int migratetype);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
|
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
|
||||||
* If specified range includes migrate types other than MOVABLE or CMA,
|
* If specified range includes migrate types other than MOVABLE or CMA,
|
||||||
|
|
|
@ -925,7 +925,7 @@ static int fallbacks[MIGRATE_TYPES][4] = {
|
||||||
* Note that start_page and end_pages are not aligned on a pageblock
|
* Note that start_page and end_pages are not aligned on a pageblock
|
||||||
* boundary. If alignment is required, use move_freepages_block()
|
* boundary. If alignment is required, use move_freepages_block()
|
||||||
*/
|
*/
|
||||||
static int move_freepages(struct zone *zone,
|
int move_freepages(struct zone *zone,
|
||||||
struct page *start_page, struct page *end_page,
|
struct page *start_page, struct page *end_page,
|
||||||
int migratetype)
|
int migratetype)
|
||||||
{
|
{
|
||||||
|
|
|
@ -201,8 +201,20 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
|
||||||
}
|
}
|
||||||
page = pfn_to_page(pfn);
|
page = pfn_to_page(pfn);
|
||||||
if (PageBuddy(page)) {
|
if (PageBuddy(page)) {
|
||||||
if (get_freepage_migratetype(page) != MIGRATE_ISOLATE)
|
/*
|
||||||
break;
|
* If race between isolatation and allocation happens,
|
||||||
|
* some free pages could be in MIGRATE_MOVABLE list
|
||||||
|
* although pageblock's migratation type of the page
|
||||||
|
* is MIGRATE_ISOLATE. Catch it and move the page into
|
||||||
|
* MIGRATE_ISOLATE list.
|
||||||
|
*/
|
||||||
|
if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
|
||||||
|
struct page *end_page;
|
||||||
|
|
||||||
|
end_page = page + (1 << page_order(page)) - 1;
|
||||||
|
move_freepages(page_zone(page), page, end_page,
|
||||||
|
MIGRATE_ISOLATE);
|
||||||
|
}
|
||||||
pfn += 1 << page_order(page);
|
pfn += 1 << page_order(page);
|
||||||
}
|
}
|
||||||
else if (page_count(page) == 0 &&
|
else if (page_count(page) == 0 &&
|
||||||
|
|
Loading…
Reference in a new issue