[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251015082727.2395128-14-kevin.brodsky@arm.com>
Date: Wed, 15 Oct 2025 09:27:27 +0100
From: Kevin Brodsky <kevin.brodsky@....com>
To: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org,
Kevin Brodsky <kevin.brodsky@....com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Andreas Larsson <andreas@...sler.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Borislav Petkov <bp@...en8.de>,
Catalin Marinas <catalin.marinas@....com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
"H. Peter Anvin" <hpa@...or.com>,
Ingo Molnar <mingo@...hat.com>,
Jann Horn <jannh@...gle.com>,
Juergen Gross <jgross@...e.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
Michal Hocko <mhocko@...e.com>,
Mike Rapoport <rppt@...nel.org>,
Nicholas Piggin <npiggin@...il.com>,
Peter Zijlstra <peterz@...radead.org>,
Ryan Roberts <ryan.roberts@....com>,
Suren Baghdasaryan <surenb@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Vlastimil Babka <vbabka@...e.cz>,
Will Deacon <will@...nel.org>,
Yeoreum Yun <yeoreum.yun@....com>,
linux-arm-kernel@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org,
sparclinux@...r.kernel.org,
xen-devel@...ts.xenproject.org,
x86@...nel.org
Subject: [PATCH v3 13/13] mm: introduce arch_wants_lazy_mmu_mode()
powerpc decides at runtime whether the lazy MMU mode should be used.
To avoid the overhead associated with managing
task_struct::lazy_mmu_state if the mode isn't used, introduce
arch_wants_lazy_mmu_mode() and bail out of lazy_mmu_mode_* if it
returns false. Add a default definition returning true, and an
appropriate implementation for powerpc.
Signed-off-by: Kevin Brodsky <kevin.brodsky@....com>
---
This patch seemed like a good idea to start with, but now I'm not so
sure that the churn added to the generic layer is worth it.
It provides a minor optimisation for just powerpc. x86 with XEN_PV also
chooses at runtime whether to implement lazy_mmu helpers or not, but
it doesn't fit this API so neatly and isn't handled here.
---
.../include/asm/book3s/64/tlbflush-hash.h | 11 ++++++-----
include/linux/pgtable.h | 16 ++++++++++++----
2 files changed, 18 insertions(+), 9 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index bbc54690d374..a91b354cf87c 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -23,10 +23,14 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
+#define arch_wants_lazy_mmu_mode arch_wants_lazy_mmu_mode
+static inline bool arch_wants_lazy_mmu_mode(void)
+{
+ return !radix_enabled();
+}
+
static inline void arch_enter_lazy_mmu_mode(void)
{
- if (radix_enabled())
- return;
/*
* apply_to_page_range can call us this preempt enabled when
* operating on kernel page tables.
@@ -46,9 +50,6 @@ static inline void arch_flush_lazy_mmu_mode(void)
static inline void arch_leave_lazy_mmu_mode(void)
{
- if (radix_enabled())
- return;
-
arch_flush_lazy_mmu_mode();
preempt_enable();
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 718c9c788114..db4f388d2a16 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -261,11 +261,19 @@ static inline int pmd_dirty(pmd_t pmd)
* currently enabled.
*/
#ifdef CONFIG_ARCH_LAZY_MMU
+
+#ifndef arch_wants_lazy_mmu_mode
+static inline bool arch_wants_lazy_mmu_mode(void)
+{
+ return true;
+}
+#endif
+
static inline void lazy_mmu_mode_enable(void)
{
struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
- if (in_interrupt())
+ if (!arch_wants_lazy_mmu_mode() || in_interrupt())
return;
VM_BUG_ON(state->count == U8_MAX);
@@ -283,7 +291,7 @@ static inline void lazy_mmu_mode_disable(void)
{
struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
- if (in_interrupt())
+ if (!arch_wants_lazy_mmu_mode() || in_interrupt())
return;
VM_BUG_ON(state->count == 0);
@@ -303,7 +311,7 @@ static inline void lazy_mmu_mode_pause(void)
{
struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
- if (in_interrupt())
+ if (!arch_wants_lazy_mmu_mode() || in_interrupt())
return;
VM_WARN_ON(state->count == 0 || !state->enabled);
@@ -316,7 +324,7 @@ static inline void lazy_mmu_mode_resume(void)
{
struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
- if (in_interrupt())
+ if (!arch_wants_lazy_mmu_mode() || in_interrupt())
return;
VM_WARN_ON(state->count == 0 || state->enabled);
--
2.47.0
Powered by blists - more mailing lists