[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <175318835421.1420.1426076396982442914.tip-bot2@tip-bot2>
Date: Tue, 22 Jul 2025 12:45:54 -0000
From: "tip-bot2 for Thomas Gleixner" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Thomas Gleixner <tglx@...utronix.de>,
Liangyan <liangyan.peng@...edance.com>, Jiri Slaby <jirislaby@...nel.org>,
x86@...nel.org, linux-kernel@...r.kernel.org, maz@...nel.org
Subject: [tip: irq/core] genirq: Move irq_wait_for_poll() to call site
The following commit has been merged into the irq/core branch of tip:
Commit-ID: 4e879dedd571128ed5aa4d5989ec0a1938804d20
Gitweb: https://git.kernel.org/tip/4e879dedd571128ed5aa4d5989ec0a1938804d20
Author: Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Fri, 18 Jul 2025 20:54:08 +02:00
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitterDate: Tue, 22 Jul 2025 14:30:42 +02:00
genirq: Move irq_wait_for_poll() to call site
Move it to the call site so that the waiting for the INPROGRESS flag can be
reused by an upcoming mitigation for a potential live lock in the edge type
handler.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Tested-by: Liangyan <liangyan.peng@...edance.com>
Reviewed-by: Jiri Slaby <jirislaby@...nel.org>
Link: https://lore.kernel.org/all/20250718185311.948555026@linutronix.de
---
kernel/irq/chip.c | 33 ++++++++++++++++++++++++---------
kernel/irq/internals.h | 2 +-
kernel/irq/spurious.c | 37 +------------------------------------
3 files changed, 26 insertions(+), 46 deletions(-)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 5bb26fc..290244c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -457,11 +457,21 @@ void unmask_threaded_irq(struct irq_desc *desc)
unmask_irq(desc);
}
-static bool irq_check_poll(struct irq_desc *desc)
-{
- if (!(desc->istate & IRQS_POLL_INPROGRESS))
- return false;
- return irq_wait_for_poll(desc);
+/* Busy wait until INPROGRESS is cleared */
+static bool irq_wait_on_inprogress(struct irq_desc *desc)
+{
+ if (IS_ENABLED(CONFIG_SMP)) {
+ do {
+ raw_spin_unlock(&desc->lock);
+ while (irqd_irq_inprogress(&desc->irq_data))
+ cpu_relax();
+ raw_spin_lock(&desc->lock);
+ } while (irqd_irq_inprogress(&desc->irq_data));
+
+ /* Might have been disabled in meantime */
+ return !irqd_irq_disabled(&desc->irq_data) && desc->action;
+ }
+ return false;
}
static bool irq_can_handle_pm(struct irq_desc *desc)
@@ -481,10 +491,15 @@ static bool irq_can_handle_pm(struct irq_desc *desc)
if (irq_pm_check_wakeup(desc))
return false;
- /*
- * Handle a potential concurrent poll on a different core.
- */
- return irq_check_poll(desc);
+ /* Check whether the interrupt is polled on another CPU */
+ if (unlikely(desc->istate & IRQS_POLL_INPROGRESS)) {
+ if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
+ "irq poll in progress on cpu %d for irq %d\n",
+ smp_processor_id(), desc->irq_data.irq))
+ return false;
+ return irq_wait_on_inprogress(desc);
+ }
+ return false;
}
static inline bool irq_can_handle_actions(struct irq_desc *desc)
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index aebfe22..82b0d67 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -20,6 +20,7 @@
#define istate core_internal_state__do_not_mess_with_it
extern bool noirqdebug;
+extern int irq_poll_cpu;
extern struct irqaction chained_action;
@@ -112,7 +113,6 @@ irqreturn_t handle_irq_event(struct irq_desc *desc);
int check_irq_resend(struct irq_desc *desc, bool inject);
void clear_irq_resend(struct irq_desc *desc);
void irq_resend_init(struct irq_desc *desc);
-bool irq_wait_for_poll(struct irq_desc *desc);
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
void wake_threads_waitq(struct irq_desc *desc);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 8f26982..73280cc 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -19,45 +19,10 @@ static int irqfixup __read_mostly;
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
static void poll_spurious_irqs(struct timer_list *unused);
static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
-static int irq_poll_cpu;
+int irq_poll_cpu;
static atomic_t irq_poll_active;
/*
- * We wait here for a poller to finish.
- *
- * If the poll runs on this CPU, then we yell loudly and return
- * false. That will leave the interrupt line disabled in the worst
- * case, but it should never happen.
- *
- * We wait until the poller is done and then recheck disabled and
- * action (about to be disabled). Only if it's still active, we return
- * true and let the handler run.
- */
-bool irq_wait_for_poll(struct irq_desc *desc)
-{
- lockdep_assert_held(&desc->lock);
-
- if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
- "irq poll in progress on cpu %d for irq %d\n",
- smp_processor_id(), desc->irq_data.irq))
- return false;
-
-#ifdef CONFIG_SMP
- do {
- raw_spin_unlock(&desc->lock);
- while (irqd_irq_inprogress(&desc->irq_data))
- cpu_relax();
- raw_spin_lock(&desc->lock);
- } while (irqd_irq_inprogress(&desc->irq_data));
- /* Might have been disabled in meantime */
- return !irqd_irq_disabled(&desc->irq_data) && desc->action;
-#else
- return false;
-#endif
-}
-
-
-/*
* Recovery handler for misrouted interrupts.
*/
static bool try_one_irq(struct irq_desc *desc, bool force)
Powered by blists - more mailing lists