[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1366363909-12771-10-git-send-email-tangchen@cn.fujitsu.com>
Date: Fri, 19 Apr 2013 17:31:46 +0800
From: Tang Chen <tangchen@...fujitsu.com>
To: rob@...dley.net, tglx@...utronix.de, mingo@...hat.com,
hpa@...or.com, akpm@...ux-foundation.org,
paulmck@...ux.vnet.ibm.com, dhowells@...hat.com, davej@...hat.com,
agordeev@...hat.com, suresh.b.siddha@...el.com, mst@...hat.com,
yinghai@...nel.org, penberg@...nel.org, jacob.shin@....com,
wency@...fujitsu.com, trenn@...e.de, liwanp@...ux.vnet.ibm.com,
isimatu.yasuaki@...fujitsu.com, rientjes@...gle.com, tj@...nel.org,
laijs@...fujitsu.com, hannes@...xchg.org, davem@...emloft.net,
mgorman@...e.de, minchan@...nel.org, m.szyprowski@...sung.com,
mina86@...a86.com
Cc: x86@...nel.org, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: [PATCH v1 09/12] x86, acpi, numa, mem-hotplug: Introduce MEMBLK_HOTPLUGGABLE to mark and reserve hotpluggable memory.
We mark out movable memory ranges and reserve them with MEMBLK_HOTPLUGGABLE flag in
memblock.reserved. This should be done after the memory mapping is initialized
because the kernel now supports allocate pagetable pages on local node, which
are kernel pages.
The reserved hotpluggable will be freed to buddy when memory initialization
is done.
This idea is from Wen Congyang <wency@...fujitsu.com> and Jiang Liu <jiang.liu@...wei.com>.
Suggested-by: Jiang Liu <jiang.liu@...wei.com>
Suggested-by: Wen Congyang <wency@...fujitsu.com>
Signed-off-by: Tang Chen <tangchen@...fujitsu.com>
---
arch/x86/mm/numa.c | 26 ++++++++++++++++++++++++++
include/linux/memblock.h | 3 +++
mm/memblock.c | 19 +++++++++++++++++++
3 files changed, 48 insertions(+), 0 deletions(-)
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 105b092..6f61691 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -731,6 +731,30 @@ static void __init early_x86_numa_init_mapping(void)
}
#endif
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+static void __init early_mem_hotplug_init()
+{
+ int i, nid;
+ phys_addr_t start, end;
+
+ if (!movablecore_enable_srat)
+ return;
+
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ if (!numa_meminfo.blk[i].hotpluggable)
+ continue;
+
+ nid = numa_meminfo.blk[i].nid;
+
+ memblock_reserve_hotpluggable(start, end - start, nid);
+ }
+}
+#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+static inline void early_mem_hotplug_init()
+{
+}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
void __init early_initmem_init(void)
{
early_x86_numa_init();
@@ -740,6 +764,8 @@ void __init early_initmem_init(void)
load_cr3(swapper_pg_dir);
__flush_tlb_all();
+ early_mem_hotplug_init();
+
early_memtest(0, max_pfn_mapped<<PAGE_SHIFT);
}
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 3b2d1c4..0f01930 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -24,6 +24,7 @@
/* Definition of memblock flags. */
enum memblock_flags {
MEMBLK_LOCAL_NODE, /* node-life-cycle data */
+ MEMBLK_HOTPLUGGABLE, /* hotpluggable region */
__NR_MEMBLK_FLAGS, /* number of flags */
};
@@ -67,8 +68,10 @@ int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
int memblock_reserve_local_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_reserve_hotpluggable(phys_addr_t base, phys_addr_t size, int nid);
void memblock_trim_memory(phys_addr_t align);
void memblock_mark_kernel_nodes(void);
+bool memblock_is_kernel_node(int nid);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
diff --git a/mm/memblock.c b/mm/memblock.c
index edde4c2..0c55588 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -596,6 +596,13 @@ int __init_memblock memblock_reserve_local_node(phys_addr_t base,
return memblock_reserve_region(base, size, nid, flags);
}
+int __init_memblock memblock_reserve_hotpluggable(phys_addr_t base,
+ phys_addr_t size, int nid)
+{
+ unsigned long flags = 1 << MEMBLK_HOTPLUGGABLE;
+ return memblock_reserve_region(base, size, nid, flags);
+}
+
/**
* __next_free_mem_range - next function for for_each_free_mem_range()
* @idx: pointer to u64 loop variable
@@ -809,11 +816,23 @@ void __init_memblock memblock_mark_kernel_nodes()
node_set(nid, memblock_kernel_nodemask);
}
}
+
+bool __init_memblock memblock_is_kernel_node(int nid)
+{
+ if (node_isset(nid, memblock_kernel_nodemask))
+ return true;
+ return false;
+}
#else
void __init_memblock memblock_mark_kernel_nodes()
{
node_set(0, memblock_kernel_nodemask);
}
+
+bool __init_memblock memblock_is_kernel_node(int nid)
+{
+ return true;
+}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
--
1.7.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists