[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c6ecf0b9425db4993fb043caf3f48f2b32d7eb28.1747264138.git.ackerleytng@google.com>
Date: Wed, 14 May 2025 16:42:00 -0700
From: Ackerley Tng <ackerleytng@...gle.com>
To: kvm@...r.kernel.org, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
x86@...nel.org, linux-fsdevel@...r.kernel.org
Cc: ackerleytng@...gle.com, aik@....com, ajones@...tanamicro.com,
akpm@...ux-foundation.org, amoorthy@...gle.com, anthony.yznaga@...cle.com,
anup@...infault.org, aou@...s.berkeley.edu, bfoster@...hat.com,
binbin.wu@...ux.intel.com, brauner@...nel.org, catalin.marinas@....com,
chao.p.peng@...el.com, chenhuacai@...nel.org, dave.hansen@...el.com,
david@...hat.com, dmatlack@...gle.com, dwmw@...zon.co.uk,
erdemaktas@...gle.com, fan.du@...el.com, fvdl@...gle.com, graf@...zon.com,
haibo1.xu@...el.com, hch@...radead.org, hughd@...gle.com, ira.weiny@...el.com,
isaku.yamahata@...el.com, jack@...e.cz, james.morse@....com,
jarkko@...nel.org, jgg@...pe.ca, jgowans@...zon.com, jhubbard@...dia.com,
jroedel@...e.de, jthoughton@...gle.com, jun.miao@...el.com,
kai.huang@...el.com, keirf@...gle.com, kent.overstreet@...ux.dev,
kirill.shutemov@...el.com, liam.merwick@...cle.com,
maciej.wieczor-retman@...el.com, mail@...iej.szmigiero.name, maz@...nel.org,
mic@...ikod.net, michael.roth@....com, mpe@...erman.id.au,
muchun.song@...ux.dev, nikunj@....com, nsaenz@...zon.es,
oliver.upton@...ux.dev, palmer@...belt.com, pankaj.gupta@....com,
paul.walmsley@...ive.com, pbonzini@...hat.com, pdurrant@...zon.co.uk,
peterx@...hat.com, pgonda@...gle.com, pvorel@...e.cz, qperret@...gle.com,
quic_cvanscha@...cinc.com, quic_eberman@...cinc.com,
quic_mnalajal@...cinc.com, quic_pderrin@...cinc.com, quic_pheragu@...cinc.com,
quic_svaddagi@...cinc.com, quic_tsoni@...cinc.com, richard.weiyang@...il.com,
rick.p.edgecombe@...el.com, rientjes@...gle.com, roypat@...zon.co.uk,
rppt@...nel.org, seanjc@...gle.com, shuah@...nel.org, steven.price@....com,
steven.sistare@...cle.com, suzuki.poulose@....com, tabba@...gle.com,
thomas.lendacky@....com, usama.arif@...edance.com, vannapurve@...gle.com,
vbabka@...e.cz, viro@...iv.linux.org.uk, vkuznets@...hat.com,
wei.w.wang@...el.com, will@...nel.org, willy@...radead.org,
xiaoyao.li@...el.com, yan.y.zhao@...el.com, yilun.xu@...el.com,
yuzenghui@...wei.com, zhiquan1.li@...el.com
Subject: [RFC PATCH v2 21/51] mm: hugetlb: Inline huge_node() into callers
huge_node()'s role was to read struct mempolicy (mpol) from the vma
and also interpret mpol to get node id and nodemask.
huge_node() can be inlined into callers since 2 out of 3 of the
callers will be refactored in later patches to take and interpret mpol
without reading mpol from the vma.
Signed-off-by: Ackerley Tng <ackerleytng@...gle.com>
Change-Id: Ic94b2ed916fd4f89b7d2755288a3a2f6a56051f7
---
include/linux/mempolicy.h | 12 ------------
mm/hugetlb.c | 13 ++++++++++---
mm/mempolicy.c | 21 ---------------------
3 files changed, 10 insertions(+), 36 deletions(-)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 840c576abcfd..41fc53605ef0 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -140,9 +140,6 @@ extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
extern int policy_node_nodemask(struct mempolicy *mpol, gfp_t gfp_flags,
pgoff_t ilx, nodemask_t **nodemask);
-extern int huge_node(struct vm_area_struct *vma,
- unsigned long addr, gfp_t gfp_flags,
- struct mempolicy **mpol, nodemask_t **nodemask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
@@ -260,15 +257,6 @@ static inline int policy_node_nodemask(struct mempolicy *mpol, gfp_t gfp_flags,
return 0;
}
-static inline int huge_node(struct vm_area_struct *vma,
- unsigned long addr, gfp_t gfp_flags,
- struct mempolicy **mpol, nodemask_t **nodemask)
-{
- *mpol = NULL;
- *nodemask = NULL;
- return 0;
-}
-
static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
{
return false;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b822b204e9b3..5cc261b90e39 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1372,10 +1372,12 @@ static struct folio *dequeue_hugetlb_folio(struct hstate *h,
struct mempolicy *mpol;
gfp_t gfp_mask;
nodemask_t *nodemask;
+ pgoff_t ilx;
int nid;
gfp_mask = htlb_alloc_mask(h);
- nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+ mpol = get_vma_policy(vma, address, h->order, &ilx);
+ nid = policy_node_nodemask(mpol, gfp_mask, ilx, &nodemask);
if (mpol_is_preferred_many(mpol)) {
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
@@ -2321,8 +2323,11 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
gfp_t gfp_mask = htlb_alloc_mask(h);
int nid;
nodemask_t *nodemask;
+ pgoff_t ilx;
+
+ mpol = get_vma_policy(vma, addr, h->order, &ilx);
+ nid = policy_node_nodemask(mpol, gfp_mask, ilx, &nodemask);
- nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
if (mpol_is_preferred_many(mpol)) {
gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
@@ -6829,10 +6834,12 @@ static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
nodemask_t *nodemask;
struct folio *folio;
gfp_t gfp_mask;
+ pgoff_t ilx;
int node;
gfp_mask = htlb_alloc_mask(h);
- node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+ mpol = get_vma_policy(vma, address, h->order, &ilx);
+ node = policy_node_nodemask(mpol, gfp_mask, ilx, &nodemask);
/*
* This is used to allocate a temporary hugetlb to hold the copied
* content, which will then be copied again to the final hugetlb
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7837158ee5a8..39d0abc407dc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2145,27 +2145,6 @@ int policy_node_nodemask(struct mempolicy *mpol, gfp_t gfp_flags,
}
#ifdef CONFIG_HUGETLBFS
-/*
- * huge_node(@vma, @addr, @gfp_flags, @mpol)
- * @vma: virtual memory area whose policy is sought
- * @addr: address in @vma for shared policy lookup and interleave policy
- * @gfp_flags: for requested zone
- * @mpol: pointer to mempolicy pointer for reference counted mempolicy
- * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
- *
- * Returns a nid suitable for a huge page allocation and a pointer
- * to the struct mempolicy for conditional unref after allocation.
- * If the effective policy is 'bind' or 'prefer-many', returns a pointer
- * to the mempolicy's @nodemask for filtering the zonelist.
- */
-int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
- struct mempolicy **mpol, nodemask_t **nodemask)
-{
- pgoff_t ilx;
-
- *mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx);
- return policy_node_nodemask(*mpol, gfp_flags, ilx, nodemask);
-}
/*
* init_nodemask_of_mempolicy
--
2.49.0.1045.g170613ef41-goog
Powered by blists - more mailing lists