lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 11 Aug 2020 14:17:49 +0800
From:   Yunfeng Ye <yeyunfeng@...wei.com>
To:     <tglx@...utronix.de>
CC:     Shiyuan Hu <hushiyuan@...wei.com>,
        Hewenliang <hewenliang4@...wei.com>,
        <linux-kernel@...r.kernel.org>
Subject: [PATCH] genirq/affinity: show managed irq affinity correctly

The "managed_irq" for isolcpus is supported after the commit
11ea68f553e2 ("genirq, sched/isolation: Isolate from handling managed
interrupts"), but the interrupt affinity shown in proc directory is
still the original affinity.

So modify the interrupt affinity correctly for managed_irq.

Signed-off-by: yeyunfeng <yeyunfeng@...wei.com>
---
 kernel/irq/manage.c | 38 ++++++++++++++++++++++++--------------
 1 file changed, 24 insertions(+), 14 deletions(-)

diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index d55ba625d426..6ad4fe01942a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -218,8 +218,8 @@ static inline void irq_init_effective_affinity(struct irq_data *data,
 					       const struct cpumask *mask) { }
 #endif

-int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
-			bool force)
+static int irq_chip_set_affinity(struct irq_data *data,
+				const struct cpumask *mask, bool force)
 {
 	struct irq_desc *desc = irq_data_to_desc(data);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -228,6 +228,26 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 	if (!chip || !chip->irq_set_affinity)
 		return -EINVAL;

+	ret = chip->irq_set_affinity(data, mask, force);
+	switch (ret) {
+	case IRQ_SET_MASK_OK:
+	case IRQ_SET_MASK_OK_DONE:
+		cpumask_copy(desc->irq_common_data.affinity, mask);
+		/* fall through */
+	case IRQ_SET_MASK_OK_NOCOPY:
+		irq_validate_effective_affinity(data);
+		irq_set_thread_affinity(desc);
+		ret = 0;
+	}
+
+	return ret;
+}
+
+int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+			bool force)
+{
+	int ret;
+
 	/*
 	 * If this is a managed interrupt and housekeeping is enabled on
 	 * it check whether the requested affinity mask intersects with
@@ -262,20 +282,10 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 			prog_mask = mask;
 		else
 			prog_mask = &tmp_mask;
-		ret = chip->irq_set_affinity(data, prog_mask, force);
+		ret = irq_chip_set_affinity(data, prog_mask, force);
 		raw_spin_unlock(&tmp_mask_lock);
 	} else {
-		ret = chip->irq_set_affinity(data, mask, force);
-	}
-	switch (ret) {
-	case IRQ_SET_MASK_OK:
-	case IRQ_SET_MASK_OK_DONE:
-		cpumask_copy(desc->irq_common_data.affinity, mask);
-		/* fall through */
-	case IRQ_SET_MASK_OK_NOCOPY:
-		irq_validate_effective_affinity(data);
-		irq_set_thread_affinity(desc);
-		ret = 0;
+		ret = irq_chip_set_affinity(data, mask, force);
 	}

 	return ret;
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ