[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251225082059.1632-3-lizhe.67@bytedance.com>
Date: Thu, 25 Dec 2025 16:20:53 +0800
From: 李喆 <lizhe.67@...edance.com>
To: <muchun.song@...ux.dev>, <osalvador@...e.de>, <david@...nel.org>,
<akpm@...ux-foundation.org>, <fvdl@...gle.com>
Cc: <linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>,
<lizhe.67@...edance.com>
Subject: [PATCH 2/8] mm/hugetlb: convert to prep_account_new_hugetlb_folio()
From: Li Zhe <lizhe.67@...edance.com>
After a huge folio is instantiated, it is always initialized through
the successive calls to prep_new_hugetlb_folio() and
account_new_hugetlb_folio(). To eliminate the risk that future changes
update one routine but overlook the other, the two functions have been
consolidated into a single entry point prep_account_new_hugetlb_folio().
Signed-off-by: Li Zhe <lizhe.67@...edance.com>
---
mm/hugetlb.c | 29 ++++++++++-------------------
1 file changed, 10 insertions(+), 19 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d20614b1c927..63f9369789b5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1874,18 +1874,14 @@ void free_huge_folio(struct folio *folio)
/*
* Must be called with the hugetlb lock held
*/
-static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
-{
- lockdep_assert_held(&hugetlb_lock);
- h->nr_huge_pages++;
- h->nr_huge_pages_node[folio_nid(folio)]++;
-}
-
-static void prep_new_hugetlb_folio(struct folio *folio)
+static void prep_account_new_hugetlb_folio(struct hstate *h,
+ struct folio *folio)
{
lockdep_assert_held(&hugetlb_lock);
folio_clear_hugetlb_freed(folio);
prep_clear_zeroed(folio);
+ h->nr_huge_pages++;
+ h->nr_huge_pages_node[folio_nid(folio)]++;
}
void init_new_hugetlb_folio(struct folio *folio)
@@ -2012,8 +2008,7 @@ void prep_and_add_allocated_folios(struct hstate *h,
/* Add all new pool pages to free lists in one lock cycle */
spin_lock_irqsave(&hugetlb_lock, flags);
list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
- prep_new_hugetlb_folio(folio);
- account_new_hugetlb_folio(h, folio);
+ prep_account_new_hugetlb_folio(h, folio);
enqueue_hugetlb_folio(h, folio);
}
spin_unlock_irqrestore(&hugetlb_lock, flags);
@@ -2220,13 +2215,12 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
return NULL;
spin_lock_irq(&hugetlb_lock);
- prep_new_hugetlb_folio(folio);
/*
* nr_huge_pages needs to be adjusted within the same lock cycle
* as surplus_pages, otherwise it might confuse
* persistent_huge_pages() momentarily.
*/
- account_new_hugetlb_folio(h, folio);
+ prep_account_new_hugetlb_folio(h, folio);
/*
* We could have raced with the pool size change.
@@ -2264,8 +2258,7 @@ static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mas
return NULL;
spin_lock_irq(&hugetlb_lock);
- prep_new_hugetlb_folio(folio);
- account_new_hugetlb_folio(h, folio);
+ prep_account_new_hugetlb_folio(h, folio);
spin_unlock_irq(&hugetlb_lock);
/* fresh huge pages are frozen */
@@ -2831,18 +2824,17 @@ static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
/*
* Ok, old_folio is still a genuine free hugepage. Remove it from
* the freelist and decrease the counters. These will be
- * incremented again when calling account_new_hugetlb_folio()
+ * incremented again when calling prep_account_new_hugetlb_folio()
* and enqueue_hugetlb_folio() for new_folio. The counters will
* remain stable since this happens under the lock.
*/
remove_hugetlb_folio(h, old_folio, false);
- prep_new_hugetlb_folio(new_folio);
/*
* Ref count on new_folio is already zero as it was dropped
* earlier. It can be directly added to the pool free list.
*/
- account_new_hugetlb_folio(h, new_folio);
+ prep_account_new_hugetlb_folio(h, new_folio);
enqueue_hugetlb_folio(h, new_folio);
/*
@@ -3318,8 +3310,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
hugetlb_bootmem_init_migratetype(folio, h);
/* Subdivide locks to achieve better parallel performance */
spin_lock_irqsave(&hugetlb_lock, flags);
- prep_new_hugetlb_folio(folio);
- account_new_hugetlb_folio(h, folio);
+ prep_account_new_hugetlb_folio(h, folio);
enqueue_hugetlb_folio(h, folio);
spin_unlock_irqrestore(&hugetlb_lock, flags);
}
--
2.20.1
Powered by blists - more mailing lists