[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260106120303.38124-3-lance.yang@linux.dev>
Date: Tue, 6 Jan 2026 20:03:03 +0800
From: Lance Yang <lance.yang@...ux.dev>
To: akpm@...ux-foundation.org
Cc: david@...nel.org,
dave.hansen@...el.com,
dave.hansen@...ux.intel.com,
will@...nel.org,
aneesh.kumar@...nel.org,
npiggin@...il.com,
peterz@...radead.org,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
x86@...nel.org,
hpa@...or.com,
arnd@...db.de,
lorenzo.stoakes@...cle.com,
ziy@...dia.com,
baolin.wang@...ux.alibaba.com,
Liam.Howlett@...cle.com,
npache@...hat.com,
ryan.roberts@....com,
dev.jain@....com,
baohua@...nel.org,
shy828301@...il.com,
riel@...riel.com,
jannh@...gle.com,
linux-arch@...r.kernel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
ioworker0@...il.com,
Lance Yang <lance.yang@...ux.dev>
Subject: [PATCH RESEND v3 2/2] mm: introduce pmdp_collapse_flush_sync() to skip redundant IPI
From: Lance Yang <lance.yang@...ux.dev>
pmdp_collapse_flush() may already send IPIs to flush TLBs, and then
callers send another IPI via tlb_remove_table_sync_one() or
pmdp_get_lockless_sync() to synchronize with concurrent GUP-fast walkers.
However, since GUP-fast runs with IRQs disabled, the TLB flush IPI already
provides the necessary synchronization. We can avoid the redundant second
IPI.
Introduce pmdp_collapse_flush_sync() which combines flush and sync:
- For architectures using the generic pmdp_collapse_flush() implementation
(e.g., x86): Use mmu_gather to track IPI sends. If the TLB flush sent
an IPI, tlb_gather_remove_table_sync_one() will skip the redundant one.
- For architectures with custom pmdp_collapse_flush() (s390, riscv,
powerpc): Fall back to calling pmdp_collapse_flush() followed by
tlb_remove_table_sync_one(). No behavior change.
Update khugepaged to use pmdp_collapse_flush_sync() instead of separate
flush and sync calls. Remove the now-unused pmdp_get_lockless_sync() macro.
Suggested-by: David Hildenbrand (Red Hat) <david@...nel.org>
Signed-off-by: Lance Yang <lance.yang@...ux.dev>
---
include/linux/pgtable.h | 13 +++++++++----
mm/khugepaged.c | 9 +++------
mm/pgtable-generic.c | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 46 insertions(+), 10 deletions(-)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index eb8aacba3698..69e290dab450 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -755,7 +755,6 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
return pmd;
}
#define pmdp_get_lockless pmdp_get_lockless
-#define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
@@ -774,9 +773,6 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
{
return pmdp_get(pmdp);
}
-static inline void pmdp_get_lockless_sync(void)
-{
-}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -1174,6 +1170,8 @@ static inline void pudp_set_wrprotect(struct mm_struct *mm,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
+extern pmd_t pmdp_collapse_flush_sync(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#else
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address,
@@ -1182,6 +1180,13 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
BUILD_BUG();
return *pmdp;
}
+static inline pmd_t pmdp_collapse_flush_sync(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return *pmdp;
+}
#define pmdp_collapse_flush pmdp_collapse_flush
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9f790ec34400..0a98afc85c50 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1177,10 +1177,9 @@ static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long a
* Parallel GUP-fast is fine since GUP-fast will back off when
* it detects PMD is changed.
*/
- _pmd = pmdp_collapse_flush(vma, address, pmd);
+ _pmd = pmdp_collapse_flush_sync(vma, address, pmd);
spin_unlock(pmd_ptl);
mmu_notifier_invalidate_range_end(&range);
- tlb_remove_table_sync_one();
pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
if (pte) {
@@ -1663,8 +1662,7 @@ static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsign
}
}
}
- pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
- pmdp_get_lockless_sync();
+ pgt_pmd = pmdp_collapse_flush_sync(vma, haddr, pmd);
pte_unmap_unlock(start_pte, ptl);
if (ptl != pml)
spin_unlock(pml);
@@ -1817,8 +1815,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* races against the prior checks.
*/
if (likely(file_backed_vma_is_retractable(vma))) {
- pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
- pmdp_get_lockless_sync();
+ pgt_pmd = pmdp_collapse_flush_sync(vma, addr, pmd);
success = true;
}
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index d3aec7a9926a..be2ee82e6fc4 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -233,6 +233,40 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
+
+pmd_t pmdp_collapse_flush_sync(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+{
+ struct mmu_gather tlb;
+ pmd_t pmd;
+
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(pmd_trans_huge(*pmdp));
+
+ tlb_gather_mmu(&tlb, vma->vm_mm);
+ pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+
+ flush_tlb_mm_range(vma->vm_mm, address, address + HPAGE_PMD_SIZE,
+ PAGE_SHIFT, true, &tlb);
+
+ /*
+ * Synchronize with GUP-fast. If the flush sent IPIs, skip the
+ * redundant sync IPI.
+ */
+ tlb_gather_remove_table_sync_one(&tlb);
+ tlb_finish_mmu(&tlb);
+ return pmd;
+}
+#else
+pmd_t pmdp_collapse_flush_sync(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd;
+
+ pmd = pmdp_collapse_flush(vma, address, pmdp);
+ tlb_remove_table_sync_one();
+ return pmd;
+}
#endif
/* arch define pte_free_defer in asm/pgalloc.h for its own implementation */
--
2.49.0
Powered by blists - more mailing lists