[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251015082727.2395128-13-kevin.brodsky@arm.com>
Date: Wed, 15 Oct 2025 09:27:26 +0100
From: Kevin Brodsky <kevin.brodsky@....com>
To: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org,
Kevin Brodsky <kevin.brodsky@....com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Andreas Larsson <andreas@...sler.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Borislav Petkov <bp@...en8.de>,
Catalin Marinas <catalin.marinas@....com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
"H. Peter Anvin" <hpa@...or.com>,
Ingo Molnar <mingo@...hat.com>,
Jann Horn <jannh@...gle.com>,
Juergen Gross <jgross@...e.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
Michal Hocko <mhocko@...e.com>,
Mike Rapoport <rppt@...nel.org>,
Nicholas Piggin <npiggin@...il.com>,
Peter Zijlstra <peterz@...radead.org>,
Ryan Roberts <ryan.roberts@....com>,
Suren Baghdasaryan <surenb@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Vlastimil Babka <vbabka@...e.cz>,
Will Deacon <will@...nel.org>,
Yeoreum Yun <yeoreum.yun@....com>,
linux-arm-kernel@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org,
sparclinux@...r.kernel.org,
xen-devel@...ts.xenproject.org,
x86@...nel.org
Subject: [PATCH v3 12/13] mm: bail out of lazy_mmu_mode_* in interrupt context
The lazy MMU mode cannot be used in interrupt context. This is
documented in <linux/pgtable.h>, but isn't consistently handled
across architectures.
arm64 ensures that calls to lazy_mmu_mode_* have no effect in
interrupt context, because such calls do occur in certain
configurations - see commit b81c688426a9 ("arm64/mm: Disable barrier
batching in interrupt contexts"). Other architectures do not check
this situation, most likely because it hasn't occurred so far.
Both arm64 and x86/Xen also ensure that any lazy MMU optimisation is
disabled while in interrupt mode (see queue_pte_barriers() and
xen_get_lazy_mode() respectively).
Let's handle this in the new generic lazy_mmu layer, in the same
fashion as arm64: bail out of lazy_mmu_mode_* if in_interrupt(), and
have in_lazy_mmu_mode() return false to disable any optimisation.
Also remove the arm64 handling that is now redundant; x86/Xen has
its own internal tracking so it is left unchanged.
Signed-off-by: Kevin Brodsky <kevin.brodsky@....com>
---
arch/arm64/include/asm/pgtable.h | 17 +----------------
include/linux/pgtable.h | 16 ++++++++++++++--
include/linux/sched.h | 3 +++
3 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 944e512767db..a37f417c30be 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -62,37 +62,22 @@ static inline void emit_pte_barriers(void)
static inline void queue_pte_barriers(void)
{
- if (in_interrupt()) {
- emit_pte_barriers();
- return;
- }
-
if (in_lazy_mmu_mode())
test_and_set_thread_flag(TIF_LAZY_MMU_PENDING);
else
emit_pte_barriers();
}
-static inline void arch_enter_lazy_mmu_mode(void)
-{
- if (in_interrupt())
- return;
-}
+static inline void arch_enter_lazy_mmu_mode(void) {}
static inline void arch_flush_lazy_mmu_mode(void)
{
- if (in_interrupt())
- return;
-
if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING))
emit_pte_barriers();
}
static inline void arch_leave_lazy_mmu_mode(void)
{
- if (in_interrupt())
- return;
-
arch_flush_lazy_mmu_mode();
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 269225a733de..718c9c788114 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -228,8 +228,8 @@ static inline int pmd_dirty(pmd_t pmd)
* of the lazy mode. So the implementation must assume preemption may be enabled
* and cpu migration is possible; it must take steps to be robust against this.
* (In practice, for user PTE updates, the appropriate page table lock(s) are
- * held, but for kernel PTE updates, no lock is held). The mode cannot be used
- * in interrupt context.
+ * held, but for kernel PTE updates, no lock is held). The mode is disabled
+ * in interrupt context and calls to the lazy_mmu API have no effect.
*
* The lazy MMU mode is enabled for a given block of code using:
*
@@ -265,6 +265,9 @@ static inline void lazy_mmu_mode_enable(void)
{
struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
+ if (in_interrupt())
+ return;
+
VM_BUG_ON(state->count == U8_MAX);
/* enable() must not be called while paused */
VM_WARN_ON(state->count > 0 && !state->enabled);
@@ -280,6 +283,9 @@ static inline void lazy_mmu_mode_disable(void)
{
struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
+ if (in_interrupt())
+ return;
+
VM_BUG_ON(state->count == 0);
VM_WARN_ON(!state->enabled);
@@ -297,6 +303,9 @@ static inline void lazy_mmu_mode_pause(void)
{
struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
+ if (in_interrupt())
+ return;
+
VM_WARN_ON(state->count == 0 || !state->enabled);
state->enabled = false;
@@ -307,6 +316,9 @@ static inline void lazy_mmu_mode_resume(void)
{
struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
+ if (in_interrupt())
+ return;
+
VM_WARN_ON(state->count == 0 || state->enabled);
arch_enter_lazy_mmu_mode();
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2862d8bf2160..beb3e6cfddd9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1731,6 +1731,9 @@ static inline char task_state_to_char(struct task_struct *tsk)
#ifdef CONFIG_ARCH_LAZY_MMU
static inline bool in_lazy_mmu_mode(void)
{
+ if (in_interrupt())
+ return false;
+
return current->lazy_mmu_state.enabled;
}
#else
--
2.47.0
Powered by blists - more mailing lists