[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241201212240.533824-7-peterx@redhat.com>
Date: Sun, 1 Dec 2024 16:22:39 -0500
From: Peter Xu <peterx@...hat.com>
To: linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: Rik van Riel <riel@...riel.com>,
Breno Leitao <leitao@...ian.org>,
Andrew Morton <akpm@...ux-foundation.org>,
peterx@...hat.com,
Muchun Song <muchun.song@...ux.dev>,
Oscar Salvador <osalvador@...e.de>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Naoya Horiguchi <nao.horiguchi@...il.com>,
Ackerley Tng <ackerleytng@...gle.com>
Subject: [PATCH 6/7] mm/hugetlb: Drop vma_has_reserves()
After the previous cleanup, vma_has_reserves() is mostly an empty helper
except that it says "use reserve count" is inverted meaning from "needs a
global reserve count", which is still true.
To avoid confusions on having two inverted ways to ask the same question,
always use the gbl_chg everywhere, and drop the function.
When at it, rename "chg" to "gbl_chg" in dequeue_hugetlb_folio_vma(). It
might be helpful for readers to see that the "chg" here is the global
reserve count, not the vma resv count.
Signed-off-by: Peter Xu <peterx@...hat.com>
---
mm/hugetlb.c | 23 ++++++-----------------
1 file changed, 6 insertions(+), 17 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b7e16b3c4e67..10251ef3289a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1246,16 +1246,6 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
hugetlb_dup_vma_private(vma);
}
-/* Returns true if the VMA has associated reserve pages */
-static bool vma_has_reserves(long chg)
-{
- /*
- * Now "chg" has all the conditions considered for whether we
- * should use an existing reservation.
- */
- return chg == 0;
-}
-
static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
{
int nid = folio_nid(folio);
@@ -1341,7 +1331,7 @@ static unsigned long available_huge_pages(struct hstate *h)
static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
struct vm_area_struct *vma,
- unsigned long address, long chg)
+ unsigned long address, long gbl_chg)
{
struct folio *folio = NULL;
struct mempolicy *mpol;
@@ -1350,11 +1340,10 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
int nid;
/*
- * A child process with MAP_PRIVATE mappings created by their parent
- * have no page reserves. This check ensures that reservations are
- * not "stolen". The child may still get SIGKILLed
+ * gbl_chg==1 means the allocation requires a new page that was not
+ * reserved before. Making sure there's at least one free page.
*/
- if (!vma_has_reserves(chg) && !available_huge_pages(h))
+ if (gbl_chg && !available_huge_pages(h))
goto err;
gfp_mask = htlb_alloc_mask(h);
@@ -1372,7 +1361,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
nid, nodemask);
- if (folio && vma_has_reserves(chg)) {
+ if (folio && !gbl_chg) {
folio_set_hugetlb_restore_reserve(folio);
h->resv_huge_pages--;
}
@@ -3023,7 +3012,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
if (!folio)
goto out_uncharge_cgroup;
spin_lock_irq(&hugetlb_lock);
- if (vma_has_reserves(gbl_chg)) {
+ if (!gbl_chg) {
folio_set_hugetlb_restore_reserve(folio);
h->resv_huge_pages--;
}
--
2.47.0
Powered by blists - more mailing lists