[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1f64e3c7f04fc725f4da4d57de1ea040b7a56952.1747264138.git.ackerleytng@google.com>
Date: Wed, 14 May 2025 16:42:01 -0700
From: Ackerley Tng <ackerleytng@...gle.com>
To: kvm@...r.kernel.org, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
x86@...nel.org, linux-fsdevel@...r.kernel.org
Cc: ackerleytng@...gle.com, aik@....com, ajones@...tanamicro.com,
akpm@...ux-foundation.org, amoorthy@...gle.com, anthony.yznaga@...cle.com,
anup@...infault.org, aou@...s.berkeley.edu, bfoster@...hat.com,
binbin.wu@...ux.intel.com, brauner@...nel.org, catalin.marinas@....com,
chao.p.peng@...el.com, chenhuacai@...nel.org, dave.hansen@...el.com,
david@...hat.com, dmatlack@...gle.com, dwmw@...zon.co.uk,
erdemaktas@...gle.com, fan.du@...el.com, fvdl@...gle.com, graf@...zon.com,
haibo1.xu@...el.com, hch@...radead.org, hughd@...gle.com, ira.weiny@...el.com,
isaku.yamahata@...el.com, jack@...e.cz, james.morse@....com,
jarkko@...nel.org, jgg@...pe.ca, jgowans@...zon.com, jhubbard@...dia.com,
jroedel@...e.de, jthoughton@...gle.com, jun.miao@...el.com,
kai.huang@...el.com, keirf@...gle.com, kent.overstreet@...ux.dev,
kirill.shutemov@...el.com, liam.merwick@...cle.com,
maciej.wieczor-retman@...el.com, mail@...iej.szmigiero.name, maz@...nel.org,
mic@...ikod.net, michael.roth@....com, mpe@...erman.id.au,
muchun.song@...ux.dev, nikunj@....com, nsaenz@...zon.es,
oliver.upton@...ux.dev, palmer@...belt.com, pankaj.gupta@....com,
paul.walmsley@...ive.com, pbonzini@...hat.com, pdurrant@...zon.co.uk,
peterx@...hat.com, pgonda@...gle.com, pvorel@...e.cz, qperret@...gle.com,
quic_cvanscha@...cinc.com, quic_eberman@...cinc.com,
quic_mnalajal@...cinc.com, quic_pderrin@...cinc.com, quic_pheragu@...cinc.com,
quic_svaddagi@...cinc.com, quic_tsoni@...cinc.com, richard.weiyang@...il.com,
rick.p.edgecombe@...el.com, rientjes@...gle.com, roypat@...zon.co.uk,
rppt@...nel.org, seanjc@...gle.com, shuah@...nel.org, steven.price@....com,
steven.sistare@...cle.com, suzuki.poulose@....com, tabba@...gle.com,
thomas.lendacky@....com, usama.arif@...edance.com, vannapurve@...gle.com,
vbabka@...e.cz, viro@...iv.linux.org.uk, vkuznets@...hat.com,
wei.w.wang@...el.com, will@...nel.org, willy@...radead.org,
xiaoyao.li@...el.com, yan.y.zhao@...el.com, yilun.xu@...el.com,
yuzenghui@...wei.com, zhiquan1.li@...el.com
Subject: [RFC PATCH v2 22/51] mm: hugetlb: Refactor hugetlb allocation functions
Refactor dequeue_hugetlb_folio() and alloc_surplus_hugetlb_folio() to
take mpol, nid and nodemask. This decouples allocation of a folio from
a vma.
Signed-off-by: Ackerley Tng <ackerleytng@...gle.com>
Change-Id: I890fb46fe8c6349383d8cf89befc68a4994eb416
---
mm/hugetlb.c | 64 ++++++++++++++++++++++++----------------------------
1 file changed, 30 insertions(+), 34 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5cc261b90e39..29d1a3fb10df 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1364,34 +1364,22 @@ static unsigned long available_huge_pages(struct hstate *h)
return h->free_huge_pages - h->resv_huge_pages;
}
-static struct folio *dequeue_hugetlb_folio(struct hstate *h,
- struct vm_area_struct *vma,
- unsigned long address)
+static struct folio *dequeue_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
+ struct mempolicy *mpol,
+ int nid, nodemask_t *nodemask)
{
struct folio *folio = NULL;
- struct mempolicy *mpol;
- gfp_t gfp_mask;
- nodemask_t *nodemask;
- pgoff_t ilx;
- int nid;
-
- gfp_mask = htlb_alloc_mask(h);
- mpol = get_vma_policy(vma, address, h->order, &ilx);
- nid = policy_node_nodemask(mpol, gfp_mask, ilx, &nodemask);
if (mpol_is_preferred_many(mpol)) {
- folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
- nid, nodemask);
+ folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask);
/* Fallback to all nodes if page==NULL */
nodemask = NULL;
}
if (!folio)
- folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
- nid, nodemask);
+ folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask);
- mpol_cond_put(mpol);
return folio;
}
@@ -2312,21 +2300,14 @@ static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mas
}
/*
- * Use the VMA's mpolicy to allocate a huge page from the buddy.
+ * Allocate a huge page from the buddy allocator given memory policy and node information.
*/
static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
- struct vm_area_struct *vma,
- unsigned long addr)
+ gfp_t gfp_mask,
+ struct mempolicy *mpol,
+ int nid, nodemask_t *nodemask)
{
struct folio *folio = NULL;
- struct mempolicy *mpol;
- gfp_t gfp_mask = htlb_alloc_mask(h);
- int nid;
- nodemask_t *nodemask;
- pgoff_t ilx;
-
- mpol = get_vma_policy(vma, addr, h->order, &ilx);
- nid = policy_node_nodemask(mpol, gfp_mask, ilx, &nodemask);
if (mpol_is_preferred_many(mpol)) {
gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
@@ -2339,7 +2320,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
if (!folio)
folio = alloc_surplus_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask);
- mpol_cond_put(mpol);
+
return folio;
}
@@ -2993,6 +2974,11 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
int ret, idx;
struct hugetlb_cgroup *h_cg = NULL;
gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
+ struct mempolicy *mpol;
+ nodemask_t *nodemask;
+ gfp_t gfp_mask;
+ pgoff_t ilx;
+ int nid;
idx = hstate_index(h);
@@ -3032,7 +3018,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
subpool_reservation_exists = npages_req == 0;
}
-
reservation_exists = vma_reservation_exists || subpool_reservation_exists;
/*
@@ -3048,21 +3033,30 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
goto out_subpool_put;
}
+ mpol = get_vma_policy(vma, addr, h->order, &ilx);
+
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
- if (ret)
+ if (ret) {
+ mpol_cond_put(mpol);
goto out_uncharge_cgroup_reservation;
+ }
+
+ gfp_mask = htlb_alloc_mask(h);
+ nid = policy_node_nodemask(mpol, gfp_mask, ilx, &nodemask);
spin_lock_irq(&hugetlb_lock);
folio = NULL;
if (reservation_exists || available_huge_pages(h))
- folio = dequeue_hugetlb_folio(h, vma, addr);
+ folio = dequeue_hugetlb_folio(h, gfp_mask, mpol, nid, nodemask);
if (!folio) {
spin_unlock_irq(&hugetlb_lock);
- folio = alloc_surplus_hugetlb_folio(h, vma, addr);
- if (!folio)
+ folio = alloc_surplus_hugetlb_folio(h, gfp_mask, mpol, nid, nodemask);
+ if (!folio) {
+ mpol_cond_put(mpol);
goto out_uncharge_cgroup;
+ }
spin_lock_irq(&hugetlb_lock);
list_add(&folio->lru, &h->hugepage_activelist);
folio_ref_unfreeze(folio, 1);
@@ -3087,6 +3081,8 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
spin_unlock_irq(&hugetlb_lock);
+ mpol_cond_put(mpol);
+
hugetlb_set_folio_subpool(folio, spool);
/* If vma accounting wasn't bypassed earlier, follow up with commit. */
--
2.49.0.1045.g170613ef41-goog
Powered by blists - more mailing lists