[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4D9B5361.3030501@kernel.org>
Date: Tue, 05 Apr 2011 10:37:37 -0700
From: Yinghai Lu <yinghai@...nel.org>
To: Tejun Heo <tj@...nel.org>
CC: mingo@...hat.com, hpa@...or.com, tglx@...utronix.de,
rientjes@...gle.com, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 03/14] x86-32, NUMA: Remove redundant top-down alloc code
from remap initialization
On 04/04/2011 03:23 PM, Tejun Heo wrote:
> memblock_find_in_range() now does top-down allocation by default, so
> there's no reason for its callers to explicitly implement it by
> gradually lowering the start address.
>
> Remove redundant top-down allocation logic from init_meminit() and
> calculate_numa_remap_pages().
>
> Signed-off-by: Tejun Heo<tj@...nel.org>
> Cc: Yinghai Lu<yinghai@...nel.org>
> Cc: David Rientjes<rientjes@...gle.com>
> Cc: Thomas Gleixner<tglx@...utronix.de
> Cc: Ingo Molnar<mingo@...hat.com>
> Cc: "H. Peter Anvin"<hpa@...or.com>
> ---
> arch/x86/mm/numa_32.c | 43 ++++++++++++++-----------------------------
> 1 files changed, 14 insertions(+), 29 deletions(-)
>
> diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
> index 50e8250..60701a5 100644
> --- a/arch/x86/mm/numa_32.c
> +++ b/arch/x86/mm/numa_32.c
> @@ -270,8 +270,7 @@ static __init unsigned long calculate_numa_remap_pages(void)
> unsigned long size, reserve_pages = 0;
>
> for_each_online_node(nid) {
> - u64 node_kva_target;
> - u64 node_kva_final;
> + u64 node_kva;
>
> /*
> * The acpi/srat node info can show hot-add memroy zones
> @@ -295,19 +294,11 @@ static __init unsigned long calculate_numa_remap_pages(void)
> /* now the roundup is correct, convert to PAGE_SIZE pages */
> size = size * PTRS_PER_PTE;
>
> - node_kva_target = round_down(node_end_pfn[nid] - size,
> - PTRS_PER_PTE);
> - node_kva_target<<= PAGE_SHIFT;
> - do {
> - node_kva_final = memblock_find_in_range(node_kva_target,
> + node_kva = memblock_find_in_range(node_start_pfn[nid]<< PAGE_SHIFT,
> ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
> - ((u64)size)<<PAGE_SHIFT,
> - LARGE_PAGE_BYTES);
> - node_kva_target -= LARGE_PAGE_BYTES;
> - } while (node_kva_final == MEMBLOCK_ERROR&&
> - (node_kva_target>>PAGE_SHIFT)> (node_start_pfn[nid]));
> -
> - if (node_kva_final == MEMBLOCK_ERROR)
> + ((u64)size)<<PAGE_SHIFT,
> + LARGE_PAGE_BYTES);
> + if (node_kva == MEMBLOCK_ERROR)
> panic("Can not get kva ram\n");
>
> node_remap_size[nid] = size;
> @@ -315,7 +306,7 @@ static __init unsigned long calculate_numa_remap_pages(void)
> reserve_pages += size;
> printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
> " node %d at %llx\n",
> - size, nid, node_kva_final>>PAGE_SHIFT);
> + size, nid, node_kva>> PAGE_SHIFT);
>
> /*
> * prevent kva address below max_low_pfn want it on system
> @@ -328,11 +319,11 @@ static __init unsigned long calculate_numa_remap_pages(void)
> * to use it as free.
> * So memblock_x86_reserve_range here, hope we don't run out of that array
> */
> - memblock_x86_reserve_range(node_kva_final,
> - node_kva_final+(((u64)size)<<PAGE_SHIFT),
> - "KVA RAM");
> + memblock_x86_reserve_range(node_kva,
> + node_kva + (((u64)size)<<PAGE_SHIFT),
> + "KVA RAM");
>
> - node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
> + node_remap_start_pfn[nid] = node_kva>> PAGE_SHIFT;
> }
> printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
> reserve_pages);
> @@ -356,7 +347,6 @@ static void init_remap_allocator(int nid)
> void __init initmem_init(void)
> {
> int nid;
> - long kva_target_pfn;
>
> /*
> * When mapping a NUMA machine we allocate the node_mem_map arrays
> @@ -371,15 +361,10 @@ void __init initmem_init(void)
>
> kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
>
> - kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
> - do {
> - kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
> - max_low_pfn<<PAGE_SHIFT,
> - kva_pages<<PAGE_SHIFT,
> - PTRS_PER_PTE<<PAGE_SHIFT)>> PAGE_SHIFT;
> - kva_target_pfn -= PTRS_PER_PTE;
> - } while (kva_start_pfn == MEMBLOCK_ERROR&& kva_target_pfn> min_low_pfn);
> -
> + kva_start_pfn = memblock_find_in_range(min_low_pfn<< PAGE_SHIFT,
> + max_low_pfn<< PAGE_SHIFT,
> + kva_pages<< PAGE_SHIFT,
> + PTRS_PER_PTE<< PAGE_SHIFT)>> PAGE_SHIFT;
> if (kva_start_pfn == MEMBLOCK_ERROR)
> panic("Can not get kva space\n");
>
Acked-by: Yinghai Lu <yinghai@...nel.org>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists