lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1477283517-2504-4-git-send-email-khandual@linux.vnet.ibm.com>
Date:   Mon, 24 Oct 2016 10:01:52 +0530
From:   Anshuman Khandual <khandual@...ux.vnet.ibm.com>
To:     linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc:     mhocko@...e.com, js1304@...il.com, vbabka@...e.cz, mgorman@...e.de,
        minchan@...nel.org, akpm@...ux-foundation.org,
        aneesh.kumar@...ux.vnet.ibm.com, bsingharora@...il.com
Subject: [RFC 3/8] mm: Isolate coherent device memory nodes from HugeTLB allocation paths

This change is part of the isolation requiring coherent device memory nodes
implementation.

Isolation seeking coherent device memory node requires allocation isolation
from implicit memory allocations from user space. Towards that effect, the
memory should not be used for generic HugeTLB page pool allocations. This
modifies relevant functions to skip all coherent memory nodes present on
the system during allocation, freeing and auditing for HugeTLB pages.

Signed-off-by: Anshuman Khandual <khandual@...ux.vnet.ibm.com>
---
 mm/hugetlb.c | 38 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 36 insertions(+), 2 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ec49d9e..466a44c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1147,6 +1147,9 @@ static int alloc_fresh_gigantic_page(struct hstate *h,
 	int nr_nodes, node;
 
 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+		if (isolated_cdm_node(node))
+			continue;
+
 		page = alloc_fresh_gigantic_page_node(h, node);
 		if (page)
 			return 1;
@@ -1382,6 +1385,9 @@ static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 	int ret = 0;
 
 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+		if (isolated_cdm_node(node))
+			continue;
+
 		page = alloc_fresh_huge_page_node(h, node);
 		if (page) {
 			ret = 1;
@@ -1410,6 +1416,9 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 	int ret = 0;
 
 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+		if (isolated_cdm_node(node))
+			continue;
+
 		/*
 		 * If we're returning unused surplus pages, only examine
 		 * nodes with surplus pages.
@@ -2028,6 +2037,9 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
 		void *addr;
 
+		if (isolated_cdm_node(node))
+			continue;
+
 		addr = memblock_virt_alloc_try_nid_nopanic(
 				huge_page_size(h), huge_page_size(h),
 				0, BOOTMEM_ALLOC_ACCESSIBLE, node);
@@ -2156,6 +2168,10 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
 	for_each_node_mask(i, *nodes_allowed) {
 		struct page *page, *next;
 		struct list_head *freel = &h->hugepage_freelists[i];
+
+		if (isolated_cdm_node(i))
+			continue;
+
 		list_for_each_entry_safe(page, next, freel, lru) {
 			if (count >= h->nr_huge_pages)
 				return;
@@ -2189,11 +2205,17 @@ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
 
 	if (delta < 0) {
 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+			if (isolated_cdm_node(node))
+				continue;
+
 			if (h->surplus_huge_pages_node[node])
 				goto found;
 		}
 	} else {
 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+			if (isolated_cdm_node(node))
+				continue;
+
 			if (h->surplus_huge_pages_node[node] <
 					h->nr_huge_pages_node[node])
 				goto found;
@@ -2666,6 +2688,10 @@ static void __init hugetlb_register_all_nodes(void)
 
 	for_each_node_state(nid, N_MEMORY) {
 		struct node *node = node_devices[nid];
+
+		if (isolated_cdm_node(nid))
+			continue;
+
 		if (node->dev.id == nid)
 			hugetlb_register_node(node);
 	}
@@ -2819,8 +2845,12 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
 	int node;
 	unsigned int nr = 0;
 
-	for_each_node_mask(node, cpuset_current_mems_allowed)
+	for_each_node_mask(node, cpuset_current_mems_allowed) {
+		if (isolated_cdm_node(node))
+			continue;
+
 		nr += array[node];
+	}
 
 	return nr;
 }
@@ -2940,7 +2970,10 @@ void hugetlb_show_meminfo(void)
 	if (!hugepages_supported())
 		return;
 
-	for_each_node_state(nid, N_MEMORY)
+	for_each_node_state(nid, N_MEMORY) {
+		if (isolated_cdm_node(nid))
+			continue;
+
 		for_each_hstate(h)
 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
 				nid,
@@ -2948,6 +2981,7 @@ void hugetlb_show_meminfo(void)
 				h->free_huge_pages_node[nid],
 				h->surplus_huge_pages_node[nid],
 				1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
+	}
 }
 
 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
-- 
2.1.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ