lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-761ea388e8c4e3ac883a94e16bcc8c51fa419d4f@git.kernel.org>
Date:   Thu, 22 Jun 2017 10:08:02 -0700
From:   tip-bot for Thomas Gleixner <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     hch@....de, linux-kernel@...r.kernel.org, mingo@...nel.org,
        mpe@...erman.id.au, hpa@...or.com, keith.busch@...el.com,
        axboe@...nel.dk, marc.zyngier@....com, peterz@...radead.org,
        tglx@...utronix.de
Subject: [tip:irq/core] genirq: Handle managed irqs gracefully in
 irq_startup()

Commit-ID:  761ea388e8c4e3ac883a94e16bcc8c51fa419d4f
Gitweb:     http://git.kernel.org/tip/761ea388e8c4e3ac883a94e16bcc8c51fa419d4f
Author:     Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Tue, 20 Jun 2017 01:37:50 +0200
Committer:  Thomas Gleixner <tglx@...utronix.de>
CommitDate: Thu, 22 Jun 2017 18:21:24 +0200

genirq: Handle managed irqs gracefully in irq_startup()

Affinity managed interrupts should keep their assigned affinity accross CPU
hotplug. To avoid magic hackery in device drivers, the core code shall
manage them transparently and set these interrupts into a managed shutdown
state when the last CPU of the assigned affinity mask goes offline. The
interrupt will be restarted when one of the CPUs in the assigned affinity
mask comes back online.

Add the necessary logic to irq_startup(). If an interrupt is requested and
started up, the code checks whether it is affinity managed and if so, it
checks whether a CPU in the interrupts affinity mask is online. If not, it
puts the interrupt into managed shutdown state. 

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Jens Axboe <axboe@...nel.dk>
Cc: Marc Zyngier <marc.zyngier@....com>
Cc: Michael Ellerman <mpe@...erman.id.au>
Cc: Keith Busch <keith.busch@...el.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Christoph Hellwig <hch@....de>
Link: http://lkml.kernel.org/r/20170619235447.189851170@linutronix.de

---
 include/linux/irq.h |  2 +-
 kernel/irq/chip.c   | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++---
 2 files changed, 62 insertions(+), 4 deletions(-)

diff --git a/include/linux/irq.h b/include/linux/irq.h
index 0e37276..807042b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -346,7 +346,7 @@ static inline bool irqd_is_started(struct irq_data *d)
 	return __irqd_to_state(d) & IRQD_IRQ_STARTED;
 }
 
-static inline bool irqd_is_managed_shutdown(struct irq_data *d)
+static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
 {
 	return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
 }
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b7599e9..fc89eeb 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -195,6 +195,52 @@ static void irq_state_set_started(struct irq_desc *desc)
 	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
 }
 
+enum {
+	IRQ_STARTUP_NORMAL,
+	IRQ_STARTUP_MANAGED,
+	IRQ_STARTUP_ABORT,
+};
+
+#ifdef CONFIG_SMP
+static int
+__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
+{
+	struct irq_data *d = irq_desc_get_irq_data(desc);
+
+	if (!irqd_affinity_is_managed(d))
+		return IRQ_STARTUP_NORMAL;
+
+	irqd_clr_managed_shutdown(d);
+
+	if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
+		/*
+		 * Catch code which fiddles with enable_irq() on a managed
+		 * and potentially shutdown IRQ. Chained interrupt
+		 * installment or irq auto probing should not happen on
+		 * managed irqs either. Emit a warning, break the affinity
+		 * and start it up as a normal interrupt.
+		 */
+		if (WARN_ON_ONCE(force))
+			return IRQ_STARTUP_NORMAL;
+		/*
+		 * The interrupt was requested, but there is no online CPU
+		 * in it's affinity mask. Put it into managed shutdown
+		 * state and let the cpu hotplug mechanism start it up once
+		 * a CPU in the mask becomes available.
+		 */
+		irqd_set_managed_shutdown(d);
+		return IRQ_STARTUP_ABORT;
+	}
+	return IRQ_STARTUP_MANAGED;
+}
+#else
+static int
+__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
+{
+	return IRQ_STARTUP_NORMAL;
+}
+#endif
+
 static int __irq_startup(struct irq_desc *desc)
 {
 	struct irq_data *d = irq_desc_get_irq_data(desc);
@@ -214,15 +260,27 @@ static int __irq_startup(struct irq_desc *desc)
 
 int irq_startup(struct irq_desc *desc, bool resend, bool force)
 {
+	struct irq_data *d = irq_desc_get_irq_data(desc);
+	struct cpumask *aff = irq_data_get_affinity_mask(d);
 	int ret = 0;
 
 	desc->depth = 0;
 
-	if (irqd_is_started(&desc->irq_data)) {
+	if (irqd_is_started(d)) {
 		irq_enable(desc);
 	} else {
-		ret = __irq_startup(desc);
-		irq_setup_affinity(desc);
+		switch (__irq_startup_managed(desc, aff, force)) {
+		case IRQ_STARTUP_NORMAL:
+			ret = __irq_startup(desc);
+			irq_setup_affinity(desc);
+			break;
+		case IRQ_STARTUP_MANAGED:
+			ret = __irq_startup(desc);
+			irq_set_affinity_locked(d, aff, false);
+			break;
+		case IRQ_STARTUP_ABORT:
+			return 0;
+		}
 	}
 	if (resend)
 		check_irq_resend(desc);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ