mm: Make memory hotplug aware of memmap holes

This patch prevents memory hotplug from marking pages of the memmap that
only reference holes in the physical address space as private. Some
architectures (including ARM) attempt to free these unneeded parts of the
memmap, and attempting to free a private page will throw bad_page warnings
and tie up the memory indefinitely.

This patch also allows early_pfn_valid to be architecture specific and
defines it for ARM. The definition for ARM takes into account memory banks
and the holes in physical memory.

CRs-Fixed: 247010

Change-Id: Iad88d427b1b923a808b026c22d2899fa0483cb9e
Signed-off-by: jesset@codeaurora.org
(cherry picked from commit 0b610c773ad6281a3d217fbbe894b2476e9e71dd)

Conflicts:

	arch/arm/mm/init.c
This commit is contained in:
Jesse Tannahill 2010-07-09 18:18:05 -07:00 committed by Stephen Boyd
parent d2affbaaa2
commit ac7ec62d35
4 changed files with 48 additions and 5 deletions

View file

@ -158,6 +158,11 @@ typedef struct page *pgtable_t;
extern int pfn_valid(unsigned long);
#endif
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
extern int _early_pfn_valid(unsigned long);
#define early_pfn_valid(pfn) (_early_pfn_valid(pfn))
#endif
#include <asm/memory.h>
#endif /* !__ASSEMBLY__ */

View file

@ -423,6 +423,28 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
memblock_dump_all();
}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
int _early_pfn_valid(unsigned long pfn)
{
struct meminfo *mi = &meminfo;
unsigned int left = 0, right = mi->nr_banks;
do {
unsigned int mid = (right + left) / 2;
struct membank *bank = &mi->bank[mid];
if (pfn < bank_pfn_start(bank))
right = mid;
else if (pfn >= bank_pfn_end(bank))
left = mid + 1;
else
return 1;
} while (left < right);
return 0;
}
EXPORT_SYMBOL(_early_pfn_valid);
#endif
void __init bootmem_init(void)
{
unsigned long min, max_low, max_high;

View file

@ -1212,7 +1212,10 @@ static inline int pfn_present(unsigned long pfn)
#define pfn_to_nid(pfn) (0)
#endif
#ifndef early_pfn_valid
#define early_pfn_valid(pfn) pfn_valid(pfn)
#endif
void sparse_init(void);
#else
#define sparse_init() do {} while (0)

View file

@ -133,9 +133,10 @@ void __ref put_page_bootmem(struct page *page)
#ifndef CONFIG_SPARSEMEM_VMEMMAP
static void register_page_bootmem_info_section(unsigned long start_pfn)
{
unsigned long *usemap, mapsize, section_nr, i;
unsigned long *usemap, mapsize, page_mapsize, section_nr, i, j;
struct mem_section *ms;
struct page *page, *memmap;
struct page *page, *memmap, *page_page;
int memmap_page_valid;
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
@ -151,9 +152,21 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
mapsize = sizeof(struct page) * PAGES_PER_SECTION;
mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
/* remember memmap's page */
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, SECTION_INFO);
page_mapsize = PAGE_SIZE/sizeof(struct page);
/* remember memmap's page, except those that reference only holes */
for (i = 0; i < mapsize; i++, page++) {
memmap_page_valid = 0;
page_page = __va(page_to_pfn(page) << PAGE_SHIFT);
for (j = 0; j < page_mapsize; j++, page_page++) {
if (early_pfn_valid(page_to_pfn(page_page))) {
memmap_page_valid = 1;
break;
}
}
if (memmap_page_valid)
get_page_bootmem(section_nr, page, SECTION_INFO);
}
usemap = __nr_to_section(section_nr)->pageblock_flags;
page = virt_to_page(usemap);