[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251015082727.2395128-6-kevin.brodsky@arm.com>
Date: Wed, 15 Oct 2025 09:27:19 +0100
From: Kevin Brodsky <kevin.brodsky@....com>
To: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org,
Kevin Brodsky <kevin.brodsky@....com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Andreas Larsson <andreas@...sler.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Borislav Petkov <bp@...en8.de>,
Catalin Marinas <catalin.marinas@....com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
"H. Peter Anvin" <hpa@...or.com>,
Ingo Molnar <mingo@...hat.com>,
Jann Horn <jannh@...gle.com>,
Juergen Gross <jgross@...e.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
Michal Hocko <mhocko@...e.com>,
Mike Rapoport <rppt@...nel.org>,
Nicholas Piggin <npiggin@...il.com>,
Peter Zijlstra <peterz@...radead.org>,
Ryan Roberts <ryan.roberts@....com>,
Suren Baghdasaryan <surenb@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Vlastimil Babka <vbabka@...e.cz>,
Will Deacon <will@...nel.org>,
Yeoreum Yun <yeoreum.yun@....com>,
linux-arm-kernel@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org,
sparclinux@...r.kernel.org,
xen-devel@...ts.xenproject.org,
x86@...nel.org
Subject: [PATCH v3 05/13] mm: introduce CONFIG_ARCH_LAZY_MMU
Architectures currently opt in for implementing lazy_mmu helpers by
defining __HAVE_ARCH_ENTER_LAZY_MMU_MODE.
In preparation for introducing a generic lazy_mmu layer that will
require storage in task_struct, let's switch to a cleaner approach:
instead of defining a macro, select a CONFIG option.
This patch introduces CONFIG_ARCH_LAZY_MMU and has each arch select
it when it implements lazy_mmu helpers.
__HAVE_ARCH_ENTER_LAZY_MMU_MODE is removed and <linux/pgtable.h>
relies on the new CONFIG instead.
On x86, lazy_mmu helpers are only implemented if PARAVIRT_XXL is
selected. This creates some complications in arch/x86/boot/, because
a few files manually undefine PARAVIRT* options. As a result
<asm/paravirt.h> does not define the lazy_mmu helpers, but this
breaks the build as <linux/pgtable.h> only defines them if
!CONFIG_ARCH_LAZY_MMU. There does not seem to be a clean way out of
this - let's just undefine that new CONFIG too.
Signed-off-by: Kevin Brodsky <kevin.brodsky@....com>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/pgtable.h | 1 -
arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 2 --
arch/powerpc/platforms/Kconfig.cputype | 1 +
arch/sparc/Kconfig | 1 +
arch/sparc/include/asm/tlbflush_64.h | 2 --
arch/x86/Kconfig | 1 +
arch/x86/boot/compressed/misc.h | 1 +
arch/x86/boot/startup/sme.c | 1 +
arch/x86/include/asm/paravirt.h | 1 -
include/linux/pgtable.h | 2 +-
mm/Kconfig | 3 +++
12 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6663ffd23f25..12d47a5f5e56 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -122,6 +122,7 @@ config ARM64
select ARCH_WANTS_NO_INSTR
select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
select ARCH_HAS_UBSAN
+ select ARCH_LAZY_MMU
select ARM_AMBA
select ARM_ARCH_TIMER
select ARM_GIC
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index aa89c2e67ebc..e3cbb10288c4 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -80,7 +80,6 @@ static inline void queue_pte_barriers(void)
}
}
-#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
/*
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 7704dbe8e88d..623a8a8b2d0e 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -24,8 +24,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
-#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-
static inline void arch_enter_lazy_mmu_mode(void)
{
struct ppc64_tlb_batch *batch;
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 7b527d18aa5e..a5e06aaf19cd 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -93,6 +93,7 @@ config PPC_BOOK3S_64
select IRQ_WORK
select PPC_64S_HASH_MMU if !PPC_RADIX_MMU
select KASAN_VMALLOC if KASAN
+ select ARCH_LAZY_MMU
config PPC_BOOK3E_64
bool "Embedded processors"
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index a630d373e645..59f17996a353 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -112,6 +112,7 @@ config SPARC64
select NEED_PER_CPU_PAGE_FIRST_CHUNK
select ARCH_SUPPORTS_SCHED_SMT if SMP
select ARCH_SUPPORTS_SCHED_MC if SMP
+ select ARCH_LAZY_MMU
config ARCH_PROC_KCORE_TEXT
def_bool y
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 925bb5d7a4e1..4e1036728e2f 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -39,8 +39,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-
void flush_tlb_pending(void);
void arch_enter_lazy_mmu_mode(void);
void arch_flush_lazy_mmu_mode(void);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fa3b616af03a..85de037cad8c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -804,6 +804,7 @@ config PARAVIRT
config PARAVIRT_XXL
bool
depends on X86_64
+ select ARCH_LAZY_MMU
config PARAVIRT_DEBUG
bool "paravirt-ops debugging"
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index db1048621ea2..80b3b79a1001 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -11,6 +11,7 @@
#undef CONFIG_PARAVIRT
#undef CONFIG_PARAVIRT_XXL
#undef CONFIG_PARAVIRT_SPINLOCKS
+#undef CONFIG_ARCH_LAZY_MMU
#undef CONFIG_KASAN
#undef CONFIG_KASAN_GENERIC
diff --git a/arch/x86/boot/startup/sme.c b/arch/x86/boot/startup/sme.c
index e7ea65f3f1d6..af74d09b68bc 100644
--- a/arch/x86/boot/startup/sme.c
+++ b/arch/x86/boot/startup/sme.c
@@ -24,6 +24,7 @@
#undef CONFIG_PARAVIRT
#undef CONFIG_PARAVIRT_XXL
#undef CONFIG_PARAVIRT_SPINLOCKS
+#undef CONFIG_ARCH_LAZY_MMU
/*
* This code runs before CPU feature bits are set. By default, the
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index b5e59a7ba0d0..13f9cd31c8f8 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -526,7 +526,6 @@ static inline void arch_end_context_switch(struct task_struct *next)
PVOP_VCALL1(cpu.end_context_switch, next);
}
-#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
PVOP_VCALL0(mmu.lazy_mode.enter);
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 32e8457ad535..124d5fa2975f 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -231,7 +231,7 @@ static inline int pmd_dirty(pmd_t pmd)
* held, but for kernel PTE updates, no lock is held). Nesting is not permitted
* and the mode cannot be used in interrupt context.
*/
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#ifndef CONFIG_ARCH_LAZY_MMU
static inline void arch_enter_lazy_mmu_mode(void) {}
static inline void arch_leave_lazy_mmu_mode(void) {}
static inline void arch_flush_lazy_mmu_mode(void) {}
diff --git a/mm/Kconfig b/mm/Kconfig
index 0e26f4fc8717..2fdcb42ca1a1 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1372,6 +1372,9 @@ config PT_RECLAIM
config FIND_NORMAL_PAGE
def_bool n
+config ARCH_LAZY_MMU
+ bool
+
source "mm/damon/Kconfig"
endmenu
--
2.47.0
Powered by blists - more mailing lists