[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170613090039.14393-4-mhocko@kernel.org>
Date: Tue, 13 Jun 2017 11:00:38 +0200
From: Michal Hocko <mhocko@...nel.org>
To: <linux-mm@...ck.org>
Cc: Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Mel Gorman <mgorman@...e.de>, Vlastimil Babka <vbabka@...e.cz>,
Andrew Morton <akpm@...ux-foundation.org>,
LKML <linux-kernel@...r.kernel.org>,
Michal Hocko <mhocko@...e.com>
Subject: [RFC PATCH 3/4] mm, hugetlb: get rid of dequeue_huge_page_node
From: Michal Hocko <mhocko@...e.com>
dequeue_huge_page_node has a single caller alloc_huge_page_node and we
already have to handle NUMA_NO_NODE specially there. So get rid of the
helper and use the same numa mask trick for hugetlb dequeue as we use
for the allocation.
Signed-off-by: Michal Hocko <mhocko@...e.com>
---
mm/hugetlb.c | 29 ++++++++++-------------------
1 file changed, 10 insertions(+), 19 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 696de029f0fa..f58d6362c2c3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -943,14 +943,6 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, int nid,
return NULL;
}
-static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
-{
- if (nid != NUMA_NO_NODE)
- return dequeue_huge_page_node_exact(h, nid);
-
- return dequeue_huge_page_nodemask(h, nid, NULL);
-}
-
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve,
@@ -1640,23 +1632,22 @@ struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
struct page *alloc_huge_page_node(struct hstate *h, int nid)
{
struct page *page = NULL;
+ nodemask_t nmask;
+
+ if (nid != NUMA_NO_NODE) {
+ nmask = NODE_MASK_NONE;
+ node_set(nid, nmask);
+ } else {
+ nmask = node_states[N_MEMORY];
+ }
spin_lock(&hugetlb_lock);
if (h->free_huge_pages - h->resv_huge_pages > 0)
- page = dequeue_huge_page_node(h, nid);
+ page = dequeue_huge_page_nodemask(h, nid, &nmask);
spin_unlock(&hugetlb_lock);
- if (!page) {
- nodemask_t nmask;
-
- if (nid != NUMA_NO_NODE) {
- nmask = NODE_MASK_NONE;
- node_set(nid, nmask);
- } else {
- nmask = node_states[N_MEMORY];
- }
+ if (!page)
page = __alloc_buddy_huge_page(h, nid, &nmask);
- }
return page;
}
--
2.11.0
Powered by blists - more mailing lists