lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240216075948.131372-4-leobras@redhat.com>
Date: Fri, 16 Feb 2024 04:59:44 -0300
From: Leonardo Bras <leobras@...hat.com>
To: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
	Jiri Slaby <jirislaby@...nel.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Tony Lindgren <tony@...mide.com>,
	Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
	John Ogness <john.ogness@...utronix.de>,
	Ilpo Järvinen <ilpo.jarvinen@...ux.intel.com>,
	Uwe Kleine-König <u.kleine-koenig@...gutronix.de>,
	Leonardo Bras <leobras@...hat.com>,
	Florian Fainelli <florian.fainelli@...adcom.com>,
	Shanker Donthineni <sdonthineni@...dia.com>
Cc: linux-kernel@...r.kernel.org,
	linux-serial@...r.kernel.org
Subject: [RFC PATCH v2 2/4] irq/spurious: Account for multiple handles in note_interrupt

Currently note_interrupt() will check threads_handled for changes and use
it to mark an IRQ as handled, in order to avoid having a threaded
interrupt to falsely trigger unhandled IRQ detection.

This detection can still be falsely triggered if we have many IRQ handled
accounted between each check of threads_handled, as those will be counted
as a single one in the unhandled IRQ detection.

In order to fix this, subtract from irqs_unhandled the number of IRQs
handled since the last check (threads_handled_last - threads_handled).

Signed-off-by: Leonardo Bras <leobras@...hat.com>
---
 include/linux/irqdesc.h |  2 +-
 kernel/irq/spurious.c   | 53 ++++++++++++++++++++++++++---------------
 2 files changed, 35 insertions(+), 20 deletions(-)

diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 62aff209315fe..957ac02e9ec2b 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -67,7 +67,7 @@ struct irq_desc {
 	unsigned long		last_unhandled;	/* Aging timer for unhandled count */
 	unsigned int		irqs_unhandled;
 	atomic_t		threads_handled;
-	int			threads_handled_last;
+	unsigned int		threads_handled_last;
 	raw_spinlock_t		lock;
 	struct cpumask		*percpu_enabled;
 	const struct cpumask	*percpu_affinity;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index d92f33b2e31ee..4e8e2727b8beb 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -267,6 +267,28 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
 	return action && (action->flags & IRQF_IRQPOLL);
 }
 
+static inline int get_handled_diff(struct irq_desc *desc)
+{
+	unsigned int handled;
+	int diff;
+
+	handled = (unsigned int)atomic_read(&desc->threads_handled);
+	handled |= SPURIOUS_DEFERRED;
+
+	diff = handled - desc->threads_handled_last;
+	diff >>= SPURIOUS_DEFERRED_SHIFT;
+
+	/*
+	 * Note: We keep the SPURIOUS_DEFERRED bit set. We are handling the
+	 * previous invocation right now. Keep it for the current one, so the
+	 * next hardware interrupt will account for it.
+	 */
+	if (diff != 0)
+		desc->threads_handled_last = handled;
+
+	return diff;
+}
+
 void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
 {
 	unsigned int irq;
@@ -308,7 +330,7 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
 		 * interrupt.
 		 */
 		if (action_ret == IRQ_WAKE_THREAD) {
-			int handled;
+			int diff;
 			/*
 			 * We use bit 0 of thread_handled_last to
 			 * denote the deferred spurious detection
@@ -325,27 +347,20 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
 			 * Check whether one of the threaded handlers
 			 * returned IRQ_HANDLED since the last
 			 * interrupt happened.
-			 *
-			 * For simplicity we just set bit 0, as it is
-			 * set in threads_handled_last as well. So we
-			 * avoid extra masking. And we really do not
-			 * care about the high bits of the handled
-			 * count. We just care about the count being
-			 * different than the one we saw before.
 			 */
-			handled = atomic_read(&desc->threads_handled);
-			handled |= SPURIOUS_DEFERRED;
-			if (handled != desc->threads_handled_last) {
-				action_ret = IRQ_HANDLED;
+			diff = get_handled_diff(desc);
+			if (diff > 0) {
 				/*
-				 * Note: We keep the SPURIOUS_DEFERRED
-				 * bit set. We are handling the
-				 * previous invocation right now.
-				 * Keep it for the current one, so the
-				 * next hardware interrupt will
-				 * account for it.
+				 * Subtract from irqs_unhandled	the number of
+				 * IRQs handled since the last change in
+				 * threads_handled.
 				 */
-				desc->threads_handled_last = handled;
+				if (diff < desc->irqs_unhandled)
+					desc->irqs_unhandled -= diff;
+				else
+					desc->irqs_unhandled = 0;
+
+				action_ret = IRQ_HANDLED;
 			} else {
 				/*
 				 * None of the threaded handlers felt
-- 
2.43.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ