[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1778a7324a1242fa907981576ebd69716a94d778.1726009989.git.ackerleytng@google.com>
Date: Tue, 10 Sep 2024 23:43:36 +0000
From: Ackerley Tng <ackerleytng@...gle.com>
To: tabba@...gle.com, quic_eberman@...cinc.com, roypat@...zon.co.uk,
jgg@...dia.com, peterx@...hat.com, david@...hat.com, rientjes@...gle.com,
fvdl@...gle.com, jthoughton@...gle.com, seanjc@...gle.com,
pbonzini@...hat.com, zhiquan1.li@...el.com, fan.du@...el.com,
jun.miao@...el.com, isaku.yamahata@...el.com, muchun.song@...ux.dev,
mike.kravetz@...cle.com
Cc: erdemaktas@...gle.com, vannapurve@...gle.com, ackerleytng@...gle.com,
qperret@...gle.com, jhubbard@...dia.com, willy@...radead.org,
shuah@...nel.org, brauner@...nel.org, bfoster@...hat.com,
kent.overstreet@...ux.dev, pvorel@...e.cz, rppt@...nel.org,
richard.weiyang@...il.com, anup@...infault.org, haibo1.xu@...el.com,
ajones@...tanamicro.com, vkuznets@...hat.com, maciej.wieczor-retman@...el.com,
pgonda@...gle.com, oliver.upton@...ux.dev, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, kvm@...r.kernel.org, linux-kselftest@...r.kernel.org,
linux-fsdevel@...ck.org
Subject: [RFC PATCH 05/39] mm: hugetlb: Refactor alloc_buddy_hugetlb_folio_with_mpol()
to interpret mempolicy instead of vma
Reducing dependence on vma avoids the hugetlb-specific assumption of
where the mempolicy is stored. This will open up other ways of using
hugetlb.
Signed-off-by: Ackerley Tng <ackerleytng@...gle.com>
---
mm/hugetlb.c | 37 +++++++++++++++++++++++--------------
1 file changed, 23 insertions(+), 14 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5cf7fb117e9d..2f2bd2444ae2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2536,32 +2536,31 @@ static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mas
}
/*
- * Use the VMA's mpolicy to allocate a huge page from the buddy.
+ * Allocate a huge page from the buddy allocator, given memory policy, node id
+ * and nodemask.
*/
-static
-struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
- struct vm_area_struct *vma, unsigned long addr)
+static struct folio *alloc_buddy_hugetlb_folio_from_node(struct hstate *h,
+ struct mempolicy *mpol,
+ int nid,
+ nodemask_t *nodemask)
{
- struct folio *folio = NULL;
- struct mempolicy *mpol;
gfp_t gfp_mask = htlb_alloc_mask(h);
- int nid;
- nodemask_t *nodemask;
+ struct folio *folio = NULL;
- nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
if (mpol_is_preferred_many(mpol)) {
gfp_t gfp = gfp_mask | __GFP_NOWARN;
gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
+ }
- /* Fallback to all nodes if page==NULL */
+ if (!folio) {
+ /* Fallback to all nodes if earlier allocation failed */
nodemask = NULL;
- }
- if (!folio)
folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
- mpol_cond_put(mpol);
+ }
+
return folio;
}
@@ -3187,8 +3186,18 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
spin_lock_irq(&hugetlb_lock);
folio = dequeue_hugetlb_folio_vma(h, vma, addr, use_hstate_resv);
if (!folio) {
+ struct mempolicy *mpol;
+ nodemask_t *nodemask;
+ pgoff_t ilx;
+ int nid;
+
spin_unlock_irq(&hugetlb_lock);
- folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
+
+ mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx);
+ nid = policy_node_nodemask(mpol, htlb_alloc_mask(h), ilx, &nodemask);
+ folio = alloc_buddy_hugetlb_folio_from_node(h, mpol, nid, nodemask);
+ mpol_cond_put(mpol);
+
if (!folio)
goto out_uncharge_cgroup;
spin_lock_irq(&hugetlb_lock);
--
2.46.0.598.g6f2099f65c-goog
Powered by blists - more mailing lists