From 0a1c234a9fabcc2e71dc7a6da7ae1cb073207281 Mon Sep 17 00:00:00 2001 From: Gianluca Guida Date: Sun, 2 Aug 2009 01:25:48 +0100 Subject: [PATCH] x86/32: honor reservations of high memory Make high memory initialization honor early reserved ranges. Signed-off-by: Gianluca Guida Signed-off-by: Jeremy Fitzhardinge diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index bca7909..573bc7f 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -432,22 +432,45 @@ static int __init add_highpages_work_fn(unsigned long start_pfn, { int node_pfn; struct page *page; + phys_addr_t chunk_end, chunk_max; unsigned long final_start_pfn, final_end_pfn; - struct add_highpages_data *data; - - data = (struct add_highpages_data *)datax; + struct add_highpages_data *data = (struct add_highpages_data *)datax; final_start_pfn = max(start_pfn, data->start_pfn); final_end_pfn = min(end_pfn, data->end_pfn); if (final_start_pfn >= final_end_pfn) return 0; - for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; - node_pfn++) { - if (!pfn_valid(node_pfn)) - continue; - page = pfn_to_page(node_pfn); - add_one_highpage_init(page); + chunk_end = PFN_PHYS(final_start_pfn); + chunk_max = PFN_PHYS(final_end_pfn); + + /* + * Check for reserved areas. + */ + for (;;) { + phys_addr_t chunk_start; + chunk_start = early_res_next_free(chunk_end); + + /* + * Reserved area. Just count high mem pages. + */ + for (node_pfn = PFN_DOWN(chunk_end); + node_pfn < PFN_DOWN(chunk_start); node_pfn++) { + if (pfn_valid(node_pfn)) + totalhigh_pages++; + } + + if (chunk_start >= chunk_max) + break; + + chunk_end = early_res_next_reserved(chunk_start, chunk_max); + for (node_pfn = PFN_DOWN(chunk_start); + node_pfn < PFN_DOWN(chunk_end); node_pfn++) { + if (!pfn_valid(node_pfn)) + continue; + page = pfn_to_page(node_pfn); + add_one_highpage_init(page); + } } return 0; @@ -461,7 +484,6 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, data.start_pfn = start_pfn; data.end_pfn = end_pfn; - work_with_active_regions(nid, add_highpages_work_fn, &data); } diff --git a/include/linux/early_res.h b/include/linux/early_res.h index 29c09f5..37317e1 100644 --- a/include/linux/early_res.h +++ b/include/linux/early_res.h @@ -8,6 +8,9 @@ extern void free_early(u64 start, u64 end); void free_early_partial(u64 start, u64 end); extern void early_res_to_bootmem(u64 start, u64 end); +extern u64 early_res_next_free(u64 start); +extern u64 early_res_next_reserved(u64 addr, u64 max); + void reserve_early_without_check(u64 start, u64 end, char *name); u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end, u64 size, u64 align); diff --git a/kernel/early_res.c b/kernel/early_res.c index 7bfae88..b663c62 100644 --- a/kernel/early_res.c +++ b/kernel/early_res.c @@ -44,6 +44,36 @@ static int __init find_overlapped_early(u64 start, u64 end) return i; } +u64 __init early_res_next_free(u64 addr) +{ + int i; + u64 end = addr; + struct early_res *r; + + for (i = 0; i < max_early_res; i++) { + r = &early_res[i]; + if (addr >= r->start && addr < r->end) { + end = r->end; + break; + } + } + return end; +} + +u64 __init early_res_next_reserved(u64 addr, u64 max) +{ + int i; + struct early_res *r; + u64 next_res = max; + + for (i = 0; i < max_early_res && early_res[i].end; i++) { + r = &early_res[i]; + if ((r->start >= addr) && (r->start < next_res)) + next_res = r->start; + } + return next_res; +} + /* * Drop the i-th range from the early reservation map, * by copying any higher ranges down one over it, and