[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <e9ff5fa689740f3b6f3ed1c1ec0e3244f20c1f06.1766455378.git.baolin.wang@linux.alibaba.com>
Date: Tue, 23 Dec 2025 13:48:35 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org,
david@...nel.org,
catalin.marinas@....com,
will@...nel.org
Cc: lorenzo.stoakes@...cle.com,
ryan.roberts@....com,
Liam.Howlett@...cle.com,
vbabka@...e.cz,
rppt@...nel.org,
surenb@...gle.com,
mhocko@...e.com,
riel@...riel.com,
harry.yoo@...cle.com,
jannh@...gle.com,
willy@...radead.org,
baohua@...nel.org,
dev.jain@....com,
baolin.wang@...ux.alibaba.com,
linux-mm@...ck.org,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v4 1/5] mm: rmap: support batched checks of the references for large folios
Currently, folio_referenced_one() always checks the young flag for each PTE
sequentially, which is inefficient for large folios. This inefficiency is
especially noticeable when reclaiming clean file-backed large folios, where
folio_referenced() is observed as a significant performance hotspot.
Moreover, on Arm64 architecture, which supports contiguous PTEs, there is already
an optimization to clear the young flags for PTEs within a contiguous range.
However, this is not sufficient. We can extend this to perform batched operations
for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE).
Introduce a new API: clear_flush_young_ptes() to facilitate batched checking
of the young flags and flushing TLB entries, thereby improving performance
during large folio reclamation. And it will be overridden by the architecture
that implements a more efficient batch operation in the following patches.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
include/linux/mmu_notifier.h | 9 +++++----
include/linux/pgtable.h | 35 +++++++++++++++++++++++++++++++++++
mm/rmap.c | 29 +++++++++++++++++++++++++++--
3 files changed, 67 insertions(+), 6 deletions(-)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index d1094c2d5fb6..dbbdcef4abf1 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner(
range->owner = owner;
}
-#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
+#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \
({ \
int __young; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
- __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
+ unsigned int ___nr = __nr; \
+ __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
__young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
___address, \
___address + \
- PAGE_SIZE); \
+ ___nr * PAGE_SIZE); \
__young; \
})
@@ -650,7 +651,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
#define mmu_notifier_range_update_to_read_only(r) false
-#define ptep_clear_flush_young_notify ptep_clear_flush_young
+#define ptep_clear_flush_young_notify clear_flush_young_ptes
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_young_notify ptep_test_and_clear_young
#define pmdp_clear_young_notify pmdp_test_and_clear_young
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 2f0dd3a4ace1..fcf7a7820061 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1087,6 +1087,41 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
}
#endif
+#ifndef clear_flush_young_ptes
+/**
+ * clear_flush_young_ptes - Clear the access bit and perform a TLB flush for PTEs
+ * that map consecutive pages of the same folio.
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear access bit.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_clear_flush_young().
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr)
+{
+ int young;
+
+ young = ptep_clear_flush_young(vma, addr, ptep);
+ while (--nr) {
+ ptep++;
+ addr += PAGE_SIZE;
+ young |= ptep_clear_flush_young(vma, addr, ptep);
+ }
+
+ return young;
+}
+#endif
+
/*
* On some architectures hardware does not set page access bit when accessing
* memory page, it is responsibility of software setting this bit. It brings
diff --git a/mm/rmap.c b/mm/rmap.c
index d6799afe1114..a0fc05f5966f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio,
struct folio_referenced_arg *pra = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
int ptes = 0, referenced = 0;
+ unsigned int nr;
while (page_vma_mapped_walk(&pvmw)) {
address = pvmw.address;
+ nr = 1;
if (vma->vm_flags & VM_LOCKED) {
ptes++;
@@ -874,9 +876,24 @@ static bool folio_referenced_one(struct folio *folio,
if (lru_gen_look_around(&pvmw))
referenced++;
} else if (pvmw.pte) {
+ if (folio_test_large(folio)) {
+ unsigned long end_addr =
+ pmd_addr_end(address, vma->vm_end);
+ unsigned int max_nr =
+ (end_addr - address) >> PAGE_SHIFT;
+ pte_t pteval = ptep_get(pvmw.pte);
+
+ nr = folio_pte_batch(folio, pvmw.pte,
+ pteval, max_nr);
+ }
+
+ ptes += nr;
if (ptep_clear_flush_young_notify(vma, address,
- pvmw.pte))
+ pvmw.pte, nr))
referenced++;
+ /* Skip the batched PTEs */
+ pvmw.pte += nr - 1;
+ pvmw.address += (nr - 1) * PAGE_SIZE;
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
if (pmdp_clear_flush_young_notify(vma, address,
pvmw.pmd))
@@ -886,7 +903,15 @@ static bool folio_referenced_one(struct folio *folio,
WARN_ON_ONCE(1);
}
- pra->mapcount--;
+ pra->mapcount -= nr;
+ /*
+ * If we are sure that we batched the entire folio,
+ * we can just optimize and stop right here.
+ */
+ if (ptes == pvmw.nr_pages) {
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
}
if (referenced)
--
2.47.3
Powered by blists - more mailing lists