[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1479369549-13309-2-git-send-email-khandual@linux.vnet.ibm.com>
Date: Thu, 17 Nov 2016 13:29:09 +0530
From: Anshuman Khandual <khandual@...ux.vnet.ibm.com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc: mhocko@...e.com, js1304@...il.com, vbabka@...e.cz, mgorman@...e.de,
minchan@...nel.org, akpm@...ux-foundation.org,
aneesh.kumar@...ux.vnet.ibm.com, bsingharora@...il.com
Subject: [DRAFT 2/2] mm/hugetlb: Restrict HugeTLB allocations only to the system RAM nodes
HugeTLB allocation/release/accounting currently spans across all the nodes
under N_MEMORY mask. CDM nodes should not be part of these. So use
system_ram() call to fetch system RAM only nodes on the platform which can
then be used for HugeTLB purpose instead of N_MEMORY. This isolates CDM
nodes from HugeTLB allocation.
Signed-off-by: Anshuman Khandual <khandual@...ux.vnet.ibm.com>
---
This also completely isolates CDM nodes from user space HugeTLB allocations
. Hence explicit allocation to the CDM nodes would not be possible any more
. To again enable explicit HugeTLB allocation capability from user space,
HugeTLB subsystem needs to be changed.
mm/hugetlb.c | 32 +++++++++++++++++++++++---------
1 file changed, 23 insertions(+), 9 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 418bf01..1936c5a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1782,6 +1782,9 @@ static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
unsigned long nr_pages;
+ nodemask_t nodes;
+
+ nodes = system_ram();
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
@@ -1801,7 +1804,7 @@ static void return_unused_surplus_pages(struct hstate *h,
* on-line nodes with memory and will handle the hstate accounting.
*/
while (nr_pages--) {
- if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
+ if (!free_pool_huge_page(h, &nodes, 1))
break;
cond_resched_lock(&hugetlb_lock);
}
@@ -2088,8 +2091,10 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
int nr_nodes, node;
+ nodemask_t nodes;
- for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
+ nodes = system_ram();
+ for_each_node_mask_to_alloc(h, nr_nodes, node, &nodes) {
void *addr;
addr = memblock_virt_alloc_try_nid_nopanic(
@@ -2158,13 +2163,15 @@ static void __init gather_bootmem_prealloc(void)
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{
unsigned long i;
+ nodemask_t nodes;
+
+ nodes = system_ram();
for (i = 0; i < h->max_huge_pages; ++i) {
if (hstate_is_gigantic(h)) {
if (!alloc_bootmem_huge_page(h))
break;
- } else if (!alloc_fresh_huge_page(h,
- &node_states[N_MEMORY]))
+ } else if (!alloc_fresh_huge_page(h, &nodes))
break;
}
h->max_huge_pages = i;
@@ -2401,8 +2408,11 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
unsigned long count, size_t len)
{
int err;
+ nodemask_t ram_nodes;
+
NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
+ ram_nodes = system_ram();
if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
err = -EINVAL;
goto out;
@@ -2415,7 +2425,7 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(nodes_allowed))) {
NODEMASK_FREE(nodes_allowed);
- nodes_allowed = &node_states[N_MEMORY];
+ nodes_allowed = &ram_nodes;
}
} else if (nodes_allowed) {
/*
@@ -2425,11 +2435,11 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
init_nodemask_of_node(nodes_allowed, nid);
} else
- nodes_allowed = &node_states[N_MEMORY];
+ nodes_allowed = &ram_nodes;
h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
- if (nodes_allowed != &node_states[N_MEMORY])
+ if (nodes_allowed != &ram_nodes)
NODEMASK_FREE(nodes_allowed);
return len;
@@ -2726,9 +2736,11 @@ static void hugetlb_register_node(struct node *node)
*/
static void __init hugetlb_register_all_nodes(void)
{
+ nodemask_t nodes;
int nid;
- for_each_node_state(nid, N_MEMORY) {
+ nodes = system_ram();
+ for_each_node_mask(nid, nodes) {
struct node *node = node_devices[nid];
if (node->dev.id == nid)
hugetlb_register_node(node);
@@ -2998,13 +3010,15 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
void hugetlb_show_meminfo(void)
{
+ nodemask_t nodes;
struct hstate *h;
int nid;
if (!hugepages_supported())
return;
- for_each_node_state(nid, N_MEMORY)
+ nodes = system_ram();
+ for_each_node_mask(nid, nodes)
for_each_hstate(h)
pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
nid,
--
1.8.3.1
Powered by blists - more mailing lists