[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251205194351.1646318-9-kas@kernel.org>
Date: Fri, 5 Dec 2025 19:43:44 +0000
From: Kiryl Shutsemau <kas@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>,
Muchun Song <muchun.song@...ux.dev>
Cc: David Hildenbrand <david@...nel.org>,
Oscar Salvador <osalvador@...e.de>,
Mike Rapoport <rppt@...nel.org>,
Vlastimil Babka <vbabka@...e.cz>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Matthew Wilcox <willy@...radead.org>,
Zi Yan <ziy@...dia.com>,
Baoquan He <bhe@...hat.com>,
Michal Hocko <mhocko@...e.com>,
Johannes Weiner <hannes@...xchg.org>,
Jonathan Corbet <corbet@....net>,
Usama Arif <usamaarif642@...il.com>,
kernel-team@...a.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org,
Kiryl Shutsemau <kas@...nel.org>
Subject: [PATCH 08/11] hugetlb: Remove VMEMMAP_SYNCHRONIZE_RCU
Fake heads no longer exist, so there is no reason to synchronize them
with page_ref_add_unless().
Remove the flag and the synchronization with synchronize_rcu() that is
gated by it.
Signed-off-by: Kiryl Shutsemau <kas@...nel.org>
---
mm/hugetlb_vmemmap.c | 20 ++++----------------
1 file changed, 4 insertions(+), 16 deletions(-)
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 2543bdbcae20..0f142e4eafb9 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -48,8 +48,6 @@ struct vmemmap_remap_walk {
#define VMEMMAP_SPLIT_NO_TLB_FLUSH BIT(0)
/* Skip the TLB flush when we remap the PTE */
#define VMEMMAP_REMAP_NO_TLB_FLUSH BIT(1)
-/* synchronize_rcu() to avoid writes from page_ref_add_unless() */
-#define VMEMMAP_SYNCHRONIZE_RCU BIT(2)
unsigned long flags;
};
@@ -423,9 +421,6 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
- if (flags & VMEMMAP_SYNCHRONIZE_RCU)
- synchronize_rcu();
-
vmemmap_start = (unsigned long)folio;
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
@@ -457,7 +452,7 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
*/
int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
{
- return __hugetlb_vmemmap_restore_folio(h, folio, VMEMMAP_SYNCHRONIZE_RCU);
+ return __hugetlb_vmemmap_restore_folio(h, folio, 0);
}
/**
@@ -480,14 +475,11 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct folio *folio, *t_folio;
long restored = 0;
long ret = 0;
- unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
+ unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH;
list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
ret = __hugetlb_vmemmap_restore_folio(h, folio, flags);
- /* only need to synchronize_rcu() once for each batch */
- flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
-
if (ret)
break;
restored++;
@@ -558,8 +550,6 @@ static int __hugetlb_vmemmap_optimize_folio(struct hstate *h,
static_branch_inc(&hugetlb_optimize_vmemmap_key);
- if (flags & VMEMMAP_SYNCHRONIZE_RCU)
- synchronize_rcu();
/*
* Very Subtle
* If VMEMMAP_REMAP_NO_TLB_FLUSH is set, TLB flushing is not performed
@@ -621,7 +611,7 @@ void hugetlb_vmemmap_optimize_folio(struct hstate *h, struct folio *folio)
{
LIST_HEAD(vmemmap_pages);
- __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, VMEMMAP_SYNCHRONIZE_RCU);
+ __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, 0);
free_vmemmap_page_list(&vmemmap_pages);
}
@@ -649,7 +639,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
struct folio *folio;
int nr_to_optimize;
LIST_HEAD(vmemmap_pages);
- unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
+ unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH;
nr_to_optimize = 0;
list_for_each_entry(folio, folio_list, lru) {
@@ -702,8 +692,6 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
int ret;
ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
- /* only need to synchronize_rcu() once for each batch */
- flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
/*
* Pages to be freed may have been accumulated. If we
--
2.51.2
Powered by blists - more mailing lists