x86-32, memblock: Make add_highpages honor early reserved ranges

Originally the only early reserved range that is overlapped with high
pages is "KVA RAM", but we already do remove that from the active ranges.

However, It turns out Xen could have that kind of overlapping to support memory
ballooning.x

So we need to make add_highpage_with_active_regions() to subtract
memblock reserved just like low ram; this is the proper design anyway.

In this patch, refactering get_freel_all_memory_range() to make it can
be used by add_highpage_with_active_regions().  Also we don't need to
remove "KVA RAM" from active ranges.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <4CABB183.1040607@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
Yinghai Lu 2010-10-05 16:15:15 -07:00 committed by H. Peter Anvin
parent 9f4c13964b
commit 1d931264af
4 changed files with 33 additions and 43 deletions

View file

@ -9,6 +9,8 @@ void memblock_x86_to_bootmem(u64 start, u64 end);
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
void memblock_x86_free_range(u64 start, u64 end);
struct range;
int __get_free_all_memory_range(struct range **range, int nodeid,
unsigned long start_pfn, unsigned long end_pfn);
int get_free_all_memory_range(struct range **rangep, int nodeid);
void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,

View file

@ -423,49 +423,28 @@ static void __init add_one_highpage_init(struct page *page)
totalhigh_pages++;
}
struct add_highpages_data {
unsigned long start_pfn;
unsigned long end_pfn;
};
static int __init add_highpages_work_fn(unsigned long start_pfn,
unsigned long end_pfn, void *datax)
void __init add_highpages_with_active_regions(int nid,
unsigned long start_pfn, unsigned long end_pfn)
{
int node_pfn;
struct page *page;
unsigned long final_start_pfn, final_end_pfn;
struct add_highpages_data *data;
struct range *range;
int nr_range;
int i;
data = (struct add_highpages_data *)datax;
nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
final_start_pfn = max(start_pfn, data->start_pfn);
final_end_pfn = min(end_pfn, data->end_pfn);
if (final_start_pfn >= final_end_pfn)
return 0;
for (i = 0; i < nr_range; i++) {
struct page *page;
int node_pfn;
for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
node_pfn++) {
if (!pfn_valid(node_pfn))
continue;
page = pfn_to_page(node_pfn);
add_one_highpage_init(page);
for (node_pfn = range[i].start; node_pfn < range[i].end;
node_pfn++) {
if (!pfn_valid(node_pfn))
continue;
page = pfn_to_page(node_pfn);
add_one_highpage_init(page);
}
}
return 0;
}
void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn)
{
struct add_highpages_data data;
data.start_pfn = start_pfn;
data.end_pfn = end_pfn;
work_with_active_regions(nid, add_highpages_work_fn, &data);
}
#else
static inline void permanent_kmaps_init(pgd_t *pgd_base)
{

View file

@ -156,7 +156,8 @@ static int __init count_early_node_map(int nodeid)
return data.nr;
}
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
unsigned long start_pfn, unsigned long end_pfn)
{
int count;
struct range *range;
@ -172,9 +173,9 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
* at first
*/
nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
#ifdef CONFIG_X86_32
subtract_range(range, count, max_low_pfn, -1ULL);
#endif
subtract_range(range, count, 0, start_pfn);
subtract_range(range, count, end_pfn, -1ULL);
memblock_x86_subtract_reserved(range, count);
nr_range = clean_sort_range(range, count);
@ -182,6 +183,16 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
return nr_range;
}
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
{
unsigned long end_pfn = -1UL;
#ifdef CONFIG_X86_32
end_pfn = max_low_pfn;
#endif
return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
}
static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
{
int i, count;

View file

@ -326,8 +326,6 @@ static __init unsigned long calculate_numa_remap_pages(void)
"KVA RAM");
node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
remove_active_range(nid, node_remap_start_pfn[nid],
node_remap_start_pfn[nid] + size);
}
printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
reserve_pages);