lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220216090845.1278114-3-maz@kernel.org>
Date:   Wed, 16 Feb 2022 09:08:45 +0000
From:   Marc Zyngier <maz@...nel.org>
To:     linux-kernel@...r.kernel.org, netdev@...r.kernel.org
Cc:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Marcin Wojtas <mw@...ihalf.com>,
        Russell King <linux@...linux.org.uk>,
        "David S. Miller" <davem@...emloft.net>,
        Jakub Kicinski <kuba@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        John Garry <john.garry@...wei.com>, kernel-team@...roid.com
Subject: [PATCH 2/2] net: mvpp2: Convert to managed interrupts to fix CPU HP issues

The MVPP2 driver uses a set of per-CPU interrupts and relies on
each particular interrupt to fire *only* on the CPU it has been
assigned to.

Although the affinity setting is restricted to prevent userspace
to move interrupts around, this all falls apart when using CPU
hotplug, as this breaks the affinity. Depending on how lucky you
are, the interrupt will then scream on the wrong CPU, eventually
leading to an ugly crash.

Ideally, the interrupt assigned to a given CPU would simply be left
where it is, only masked when the CPU goes down, and brought back
up when the CPU is alive again. As it turns out, this is the model
used for most multi-queue devices, and we'd be better off using it
for the MVPP2 driver.

Drop the home-baked affinity settings in favour of the ready-made
irq_set_affinity_masks() helper, making things slightly simpler.

With this change, the driver able to sustain CPUs being taken away.
What is still missing is a way to tell the device that it should
stop sending traffic to a given CPU.

Signed-off-by: Marc Zyngier <maz@...nel.org>
---
 drivers/net/ethernet/marvell/mvpp2/mvpp2.h    |  1 -
 .../net/ethernet/marvell/mvpp2/mvpp2_main.c   | 67 ++++++++++---------
 2 files changed, 34 insertions(+), 34 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index ad73a488fc5f..86f8feaf5350 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1143,7 +1143,6 @@ struct mvpp2_queue_vector {
 	int nrxqs;
 	u32 pending_cause_rx;
 	struct mvpp2_port *port;
-	struct cpumask *mask;
 };
 
 /* Internal represention of a Flow Steering rule */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 7cdbf8b8bbf6..cdc519583e86 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4674,49 +4674,54 @@ static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
 
 static int mvpp2_irqs_init(struct mvpp2_port *port)
 {
-	int err, i;
+	struct irq_affinity affd = {
+		/* No pre/post-vectors, single set */
+	};
+	int err, i, nvec, *irqs;
 
-	for (i = 0; i < port->nqvecs; i++) {
+	for (i = nvec = 0; i < port->nqvecs; i++) {
 		struct mvpp2_queue_vector *qv = port->qvecs + i;
 
-		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
-			qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
-			if (!qv->mask) {
-				err = -ENOMEM;
-				goto err;
-			}
+		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+			nvec++;
+	}
 
-			irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
-		}
+	irqs = kmalloc(sizeof(*irqs) * nvec, GFP_KERNEL);
+	if (!irqs)
+		return -ENOMEM;
 
-		err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
-		if (err)
-			goto err;
+	for (i = 0; i < port->nqvecs; i++) {
+		struct mvpp2_queue_vector *qv = port->qvecs + i;
 
-		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
-			unsigned int cpu;
+		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+			irqs[i] = qv->irq;
+	}
 
-			for_each_present_cpu(cpu) {
-				if (mvpp2_cpu_to_thread(port->priv, cpu) ==
-				    qv->sw_thread_id)
-					cpumask_set_cpu(cpu, qv->mask);
-			}
+	err = irq_set_affinity_masks(&affd, irqs, nvec);
+	if (err)
+		goto err;
 
-			irq_set_affinity_hint(qv->irq, qv->mask);
+	for (i = 0; i < port->nqvecs; i++) {
+		struct mvpp2_queue_vector *qv = port->qvecs + i;
+
+		err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
+		if (err) {
+			nvec = i;
+			break;
 		}
 	}
 
-	return 0;
-err:
-	for (i = 0; i < port->nqvecs; i++) {
-		struct mvpp2_queue_vector *qv = port->qvecs + i;
+	if (err) {
+		for (i = 0; i < nvec; i++) {
+			struct mvpp2_queue_vector *qv = port->qvecs + i;
 
-		irq_set_affinity_hint(qv->irq, NULL);
-		kfree(qv->mask);
-		qv->mask = NULL;
-		free_irq(qv->irq, qv);
+			free_irq(qv->irq, qv);
+		}
 	}
 
+err:
+	kfree(irqs);
+
 	return err;
 }
 
@@ -4727,10 +4732,6 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
 	for (i = 0; i < port->nqvecs; i++) {
 		struct mvpp2_queue_vector *qv = port->qvecs + i;
 
-		irq_set_affinity_hint(qv->irq, NULL);
-		kfree(qv->mask);
-		qv->mask = NULL;
-		irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
 		free_irq(qv->irq, qv);
 	}
 }
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ