[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f4dc7bb6-be3a-c1b-c30-37c4e0c16e4d@google.com>
Date: Mon, 25 Sep 2023 01:32:02 -0700 (PDT)
From: Hugh Dickins <hughd@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
cc: Andi Kleen <ak@...ux.intel.com>, Christoph Lameter <cl@...ux.com>,
Matthew Wilcox <willy@...radead.org>,
Mike Kravetz <mike.kravetz@...cle.com>,
David Hildenbrand <david@...hat.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Yang Shi <shy828301@...il.com>,
Sidhartha Kumar <sidhartha.kumar@...cle.com>,
Vishal Moola <vishal.moola@...il.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Tejun Heo <tj@...nel.org>,
Mel Gorman <mgorman@...hsingularity.net>,
Michal Hocko <mhocko@...e.com>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: [PATCH 09/12] mm: add page_rmappable_folio() wrapper
folio_prep_large_rmappable() is being used repeatedly along with a
conversion from page to folio, a check non-NULL, a check order > 1:
wrap it all up into struct folio *page_rmappable_folio(struct page *).
Signed-off-by: Hugh Dickins <hughd@...gle.com>
---
include/linux/huge_mm.h | 13 +++++++++++++
mm/mempolicy.c | 17 +++--------------
mm/page_alloc.c | 8 ++------
3 files changed, 18 insertions(+), 20 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index fa0350b0812a..58e7662a8a62 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -141,6 +141,15 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
void folio_prep_large_rmappable(struct folio *folio);
+static inline struct folio *page_rmappable_folio(struct page *page)
+{
+ struct folio *folio = (struct folio *)page;
+
+ if (folio && folio_order(folio) > 1)
+ folio_prep_large_rmappable(folio);
+ return folio;
+}
+
bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
@@ -281,6 +290,10 @@ static inline bool hugepage_vma_check(struct vm_area_struct *vma,
}
static inline void folio_prep_large_rmappable(struct folio *folio) {}
+static inline struct folio *page_rmappable_folio(struct page *page)
+{
+ return (struct folio *)page;
+}
#define transparent_hugepage_flags 0UL
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7ab6102d7da4..4c3b3f535630 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2137,10 +2137,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
mpol_cond_put(pol);
gfp |= __GFP_COMP;
page = alloc_page_interleave(gfp, order, nid);
- folio = (struct folio *)page;
- if (folio && order > 1)
- folio_prep_large_rmappable(folio);
- goto out;
+ return page_rmappable_folio(page);
}
if (pol->mode == MPOL_PREFERRED_MANY) {
@@ -2150,10 +2147,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
gfp |= __GFP_COMP;
page = alloc_pages_preferred_many(gfp, order, node, pol);
mpol_cond_put(pol);
- folio = (struct folio *)page;
- if (folio && order > 1)
- folio_prep_large_rmappable(folio);
- goto out;
+ return page_rmappable_folio(page);
}
if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
@@ -2247,12 +2241,7 @@ EXPORT_SYMBOL(alloc_pages);
struct folio *folio_alloc(gfp_t gfp, unsigned order)
{
- struct page *page = alloc_pages(gfp | __GFP_COMP, order);
- struct folio *folio = (struct folio *)page;
-
- if (folio && order > 1)
- folio_prep_large_rmappable(folio);
- return folio;
+ return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order));
}
EXPORT_SYMBOL(folio_alloc);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 95546f376302..5b1707d9025a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4456,12 +4456,8 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask)
{
struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
- preferred_nid, nodemask);
- struct folio *folio = (struct folio *)page;
-
- if (folio && order > 1)
- folio_prep_large_rmappable(folio);
- return folio;
+ preferred_nid, nodemask);
+ return page_rmappable_folio(page);
}
EXPORT_SYMBOL(__folio_alloc);
--
2.35.3
Powered by blists - more mailing lists