[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1568961203-18660-2-git-send-email-anshuman.khandual@arm.com>
Date: Fri, 20 Sep 2019 12:03:22 +0530
From: Anshuman Khandual <anshuman.khandual@....com>
To: linux-mm@...ck.org
Cc: Anshuman Khandual <anshuman.khandual@....com>,
Mike Kravetz <mike.kravetz@...cle.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH V3 1/2] mm/hugetlb: Make alloc_gigantic_page() available for general use
alloc_gigantic_page() implements an allocation method where it scans over
various zones looking for a large contiguous memory block which could not
have been allocated through the buddy allocator. A subsequent patch which
tests arch page table helpers needs such a method to allocate PUD_SIZE
sized memory block. In the future such methods might have other use cases
as well. So alloc_gigantic_page() has been split carving out actual memory
allocation method and made available via new alloc_gigantic_page_order().
Cc: Mike Kravetz <mike.kravetz@...cle.com>
Cc: linux-kernel@...r.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@....com>
---
include/linux/hugetlb.h | 9 +++++++++
mm/hugetlb.c | 24 ++++++++++++++++++++++--
2 files changed, 31 insertions(+), 2 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 53fc34f9..cc50d5a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -299,6 +299,9 @@ static inline bool is_file_hugepages(struct file *file)
}
+struct page *
+alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
#else /* !CONFIG_HUGETLBFS */
#define is_file_hugepages(file) false
@@ -310,6 +313,12 @@ hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
return ERR_PTR(-ENOSYS);
}
+static inline struct page *
+alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask)
+{
+ return NULL;
+}
#endif /* !CONFIG_HUGETLBFS */
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ef37c85..3fb8125 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1112,10 +1112,9 @@ static bool zone_spans_last_pfn(const struct zone *zone,
return zone_spans_pfn(zone, last_pfn);
}
-static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
- unsigned int order = huge_page_order(h);
unsigned long nr_pages = 1 << order;
unsigned long ret, pfn, flags;
struct zonelist *zonelist;
@@ -1151,6 +1150,14 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
return NULL;
}
+static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask)
+{
+ unsigned int order = huge_page_order(h);
+
+ return alloc_gigantic_page_order(order, gfp_mask, nid, nodemask);
+}
+
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
static void prep_compound_gigantic_page(struct page *page, unsigned int order);
#else /* !CONFIG_CONTIG_ALLOC */
@@ -1159,6 +1166,12 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
{
return NULL;
}
+
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask)
+{
+ return NULL;
+}
#endif /* CONFIG_CONTIG_ALLOC */
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
@@ -1167,6 +1180,13 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
{
return NULL;
}
+
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask)
+{
+ return NULL;
+}
+
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
static inline void destroy_compound_gigantic_page(struct page *page,
unsigned int order) { }
--
2.7.4
Powered by blists - more mailing lists