[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230209111459.1891941-20-sashal@kernel.org>
Date: Thu, 9 Feb 2023 06:14:39 -0500
From: Sasha Levin <sashal@...nel.org>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
Cc: Nicholas Piggin <npiggin@...il.com>,
Michael Ellerman <mpe@...erman.id.au>,
Sasha Levin <sashal@...nel.org>, christophe.leroy@...roup.eu,
pmladek@...e.com, arnd@...db.de, bigeasy@...utronix.de,
heying24@...wei.com, joel@....id.au, Julia.Lawall@...ia.fr,
ganeshgr@...ux.ibm.com, linuxppc-dev@...ts.ozlabs.org
Subject: [PATCH AUTOSEL 6.1 20/38] powerpc/64: Fix perf profiling asynchronous interrupt handlers
From: Nicholas Piggin <npiggin@...il.com>
[ Upstream commit c28548012ee2bac55772ef7685138bd1124b80c3 ]
Interrupt entry sets the soft mask to IRQS_ALL_DISABLED to match the
hard irq disabled state. So when should_hard_irq_enable() returns true
because we want PMI interrupts in irq handlers, MSR[EE] is enabled but
PMIs just get soft-masked. Fix this by clearing IRQS_PMI_DISABLED before
enabling MSR[EE].
This also tidies some of the warnings, no need to duplicate them in
both should_hard_irq_enable() and do_hard_irq_enable().
Signed-off-by: Nicholas Piggin <npiggin@...il.com>
Signed-off-by: Michael Ellerman <mpe@...erman.id.au>
Link: https://lore.kernel.org/r/20230121100156.2824054-1-npiggin@gmail.com
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
arch/powerpc/include/asm/hw_irq.h | 41 ++++++++++++++++++++++---------
arch/powerpc/kernel/dbell.c | 2 +-
arch/powerpc/kernel/irq.c | 2 +-
arch/powerpc/kernel/time.c | 2 +-
4 files changed, 32 insertions(+), 15 deletions(-)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 77fa88c2aed0d..265d0eb7ed796 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
return flags;
}
+static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
+{
+ unsigned long flags = irq_soft_mask_return();
+
+ irq_soft_mask_set(flags & ~mask);
+
+ return flags;
+}
+
static inline unsigned long arch_local_save_flags(void)
{
return irq_soft_mask_return();
@@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
* is a different soft-masked interrupt pending that requires hard
* masking.
*/
-static inline bool should_hard_irq_enable(void)
+static inline bool should_hard_irq_enable(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+ WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(mfmsr() & MSR_EE);
}
@@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
*
* TODO: Add test for 64e
*/
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
- return false;
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
+ if (!power_pmu_wants_prompt_pmi())
+ return false;
+ /*
+ * If PMIs are disabled then IRQs should be disabled as well,
+ * so we shouldn't see this condition, check for it just in
+ * case because we are about to enable PMIs.
+ */
+ if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
+ return false;
+ }
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
return false;
@@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
/*
* Do the hard enabling, only call this if should_hard_irq_enable is true.
+ * This allows PMI interrupts to profile irq handlers.
*/
static inline void do_hard_irq_enable(void)
{
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
- WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
- WARN_ON(mfmsr() & MSR_EE);
- }
/*
- * This allows PMI interrupts (and watchdog soft-NMIs) through.
- * There is no other reason to enable this way.
+ * Asynch interrupts come in with IRQS_ALL_DISABLED,
+ * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
*/
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
}
@@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
-static __always_inline bool should_hard_irq_enable(void)
+static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
{
return false;
}
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index f55c6fb34a3a0..5712dd846263c 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
ppc_msgsync();
- if (should_hard_irq_enable())
+ if (should_hard_irq_enable(regs))
do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id());
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9ede61a5a469e..55142ff649f3f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */
- if (should_hard_irq_enable())
+ if (should_hard_irq_enable(regs))
do_hard_irq_enable();
/* And finally process it */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index a2ab397065c66..f157552d79b38 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -533,7 +533,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
}
/* Conditionally hard-enable interrupts. */
- if (should_hard_irq_enable()) {
+ if (should_hard_irq_enable(regs)) {
/*
* Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions.
--
2.39.0
Powered by blists - more mailing lists