[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200902180628.4052244-15-zi.yan@sent.com>
Date: Wed, 2 Sep 2020 14:06:26 -0400
From: Zi Yan <zi.yan@...t.com>
To: linux-mm@...ck.org, Roman Gushchin <guro@...com>
Cc: Rik van Riel <riel@...riel.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Matthew Wilcox <willy@...radead.org>,
Shakeel Butt <shakeelb@...gle.com>,
Yang Shi <yang.shi@...ux.alibaba.com>,
David Nellans <dnellans@...dia.com>,
linux-kernel@...r.kernel.org, Zi Yan <ziy@...dia.com>
Subject: [RFC PATCH 14/16] mm: page_alloc: >=MAX_ORDER pages allocation an deallocation.
From: Zi Yan <ziy@...dia.com>
Use alloc_contig_pages for allocation and destroy_compound_gigantic_page
for deallocation, so 1GB THP can be created and destroyed without
changing MAX_ORDER.
Signed-off-by: Zi Yan <ziy@...dia.com>
---
mm/hugetlb.c | 22 ----------------------
mm/internal.h | 2 ++
mm/mempolicy.c | 15 ++++++++++++++-
mm/page_alloc.c | 33 ++++++++++++++++++++++++++++-----
4 files changed, 44 insertions(+), 28 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4113d7b66fee..d5357778b026 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1211,26 +1211,6 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
nr_nodes--)
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static void destroy_compound_gigantic_page(struct page *page,
- unsigned int order)
-{
- int i;
- int nr_pages = 1 << order;
- struct page *p = page + 1;
-
- atomic_set(compound_mapcount_ptr(page), 0);
- if (hpage_pincount_available(page))
- atomic_set(compound_pincount_ptr(page), 0);
-
- for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
- clear_compound_head(p);
- set_page_refcounted(p);
- }
-
- set_compound_order(page, 0);
- __ClearPageHead(page);
-}
-
static void free_gigantic_page(struct page *page, unsigned int order)
{
/*
@@ -1288,8 +1268,6 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
return NULL;
}
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
-static inline void destroy_compound_gigantic_page(struct page *page,
- unsigned int order) { }
#endif
static void update_and_free_page(struct hstate *h, struct page *page)
diff --git a/mm/internal.h b/mm/internal.h
index 10c677655912..520fd9b5e18a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -620,4 +620,6 @@ struct migration_target_control {
gfp_t gfp_mask;
};
+void destroy_compound_gigantic_page(struct page *page,
+ unsigned int order);
#endif /* __MM_INTERNAL_H */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eddbe4e56c73..4bae089e7a89 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2138,7 +2138,12 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
{
struct page *page;
- page = __alloc_pages(gfp, order, nid);
+ if (order > MAX_ORDER) {
+ page = alloc_contig_pages(1UL<<order, gfp, nid, NULL);
+ if (page && (gfp & __GFP_COMP))
+ prep_compound_page(page, order);
+ } else
+ page = __alloc_pages(gfp, order, nid);
/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
if (!static_branch_likely(&vm_numa_stat_key))
return page;
@@ -2212,6 +2217,14 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
+
+ if (order > MAX_ORDER) {
+ page = alloc_contig_pages(1UL<<order, gfp,
+ hpage_node, NULL);
+ if (page && (gfp & __GFP_COMP))
+ prep_compound_page(page, order);
+ goto out;
+ }
/*
* First, try to allocate THP only on local node, but
* don't reclaim unnecessarily, just compact.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 97a4c7e4a579..8a8b241508f7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1480,6 +1480,24 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
}
}
+void destroy_compound_gigantic_page(struct page *page,
+ unsigned int order)
+{
+ int i;
+ int nr_pages = 1 << order;
+ struct page *p = page + 1;
+
+ atomic_set(compound_mapcount_ptr(page), 0);
+ for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+ clear_compound_head(p);
+ set_page_refcounted(p);
+ }
+
+ set_compound_order(page, 0);
+ __ClearPageHead(page);
+ set_page_refcounted(page);
+}
+
static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
@@ -1489,11 +1507,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (!free_pages_prepare(page, order, true))
return;
- migratetype = get_pfnblock_migratetype(page, pfn);
- local_irq_save(flags);
- __count_vm_events(PGFREE, 1 << order);
- free_one_page(page_zone(page), page, pfn, order, migratetype);
- local_irq_restore(flags);
+ if (order >= MAX_ORDER) {
+ destroy_compound_gigantic_page(page, order);
+ free_contig_range(page_to_pfn(page), 1 << order);
+ } else {
+ migratetype = get_pfnblock_migratetype(page, pfn);
+ local_irq_save(flags);
+ __count_vm_events(PGFREE, 1 << order);
+ free_one_page(page_zone(page), page, pfn, order, migratetype);
+ local_irq_restore(flags);
+ }
}
void __free_pages_core(struct page *page, unsigned int order)
--
2.28.0
Powered by blists - more mailing lists