[PATCH] x86: fix init_memory_mapping over boundary v3 some end boundary is only page alignment, instead of 2M alignment, v2: make init_memory_mapping more solid: start could be any value other than 0 v3: fix NON PAE by handling left over in kernel_physical_mapping Signed-off-by: Yinghai Lu --- arch/x86/mm/init_32.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) Index: linux-2.6/arch/x86/mm/init_32.c =================================================================== --- linux-2.6.orig/arch/x86/mm/init_32.c +++ linux-2.6/arch/x86/mm/init_32.c @@ -195,7 +195,7 @@ static void __init kernel_physical_mappi unsigned pages_2m = 0, pages_4k = 0; unsigned limit_pfn = end >> PAGE_SHIFT; - pgd_idx = pgd_index(PAGE_OFFSET); + pgd_idx = pgd_index(start + PAGE_OFFSET); pgd = pgd_base + pgd_idx; pfn = start >> PAGE_SHIFT; @@ -218,7 +218,8 @@ static void __init kernel_physical_mappi * and overlapping MTRRs into large pages can cause * slowdowns. */ - if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) { + if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0) && + (pfn + PTRS_PER_PTE) <= limit_pfn) { unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; @@ -233,13 +234,12 @@ static void __init kernel_physical_mappi set_pmd(pmd, pfn_pmd(pfn, prot)); pfn += PTRS_PER_PTE; - max_pfn_mapped = pfn; continue; } pte = one_page_table_init(pmd); for (pte_ofs = 0; - pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; + pte_ofs < PTRS_PER_PTE && pfn < limit_pfn; pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { pgprot_t prot = PAGE_KERNEL; @@ -249,7 +249,6 @@ static void __init kernel_physical_mappi pages_4k++; set_pte(pte, pfn_pte(pfn, prot)); } - max_pfn_mapped = pfn; } } update_page_count(PG_LEVEL_2M, pages_2m); @@ -729,7 +728,7 @@ void __init setup_bootmem_allocator(void static void __init find_early_table_space(unsigned long end) { - unsigned long puds, pmds, tables, start; + unsigned long puds, pmds, ptes, tables, start; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; tables = PAGE_ALIGN(puds * sizeof(pud_t)); @@ -737,10 +736,15 @@ static void __init find_early_table_spac pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); - if (!cpu_has_pse) { - int ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; - tables += PAGE_ALIGN(ptes * sizeof(pte_t)); - } + if (cpu_has_pse) { + unsigned long extra; + extra = end - ((end>>21) << 21); + extra += (2UL<<20); + ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; + } else + ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; + + tables += PAGE_ALIGN(ptes * sizeof(pte_t)); /* * RED-PEN putting page tables only on node 0 could