[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181204155122.6327-3-douliyangs@gmail.com>
Date: Tue, 4 Dec 2018 23:51:21 +0800
From: Dou Liyang <douliyangs@...il.com>
To: linux-kernel@...r.kernel.org, linux-pci@...r.kernel.org
Cc: tglx@...utronix.de, kashyap.desai@...adcom.com,
shivasharan.srikanteshwara@...adcom.com, sumit.saxena@...adcom.com,
ming.lei@...hat.com, hch@....de, bhelgaas@...gle.com,
douliyang1@...wei.com, Dou Liyang <douliyangs@...il.com>
Subject: [PATCH 2/3] irq/affinity: Add is_managed into struct irq_affinity_desc
Now, Linux uses the irq_affinity_desc to convey information.
As Kashyap and Sumit reported, in MSI/-x subsystem, the pre/post vectors
may be used to some extra reply queues for performance.
https://marc.info/?l=linux-kernel&m=153543887027997&w=2
Their affinities are not NULL, but, they should be mapped as unmanaged
interrupts. So, only transfering the irq affinity assignments is not enough.
Add a new bit "is_managed" to convey the info in irq_affinity_desc and use
it in alloc_descs().
Reported-by: Kashyap Desai <kashyap.desai@...adcom.com>
Reported-by: Sumit Saxena <sumit.saxena@...adcom.com>
Signed-off-by: Dou Liyang <douliyangs@...il.com>
---
include/linux/interrupt.h | 1 +
kernel/irq/affinity.c | 7 +++++++
kernel/irq/irqdesc.c | 9 +++++++--
3 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 71be303231e9..a12b3dbbc45e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -263,6 +263,7 @@ struct irq_affinity {
*/
struct irq_affinity_desc {
struct cpumask mask;
+ unsigned int is_managed : 1;
};
#if defined(CONFIG_SMP)
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 1562a36e7c0f..d122575ba1b4 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -289,6 +289,13 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
for (; curvec < nvecs; curvec++)
cpumask_copy(&masks[curvec].mask, irq_default_affinity);
+ /* Setup complementary information */
+ for (i = 0; i < nvecs; i++) {
+ if (i >= affd->pre_vectors && i < nvecs - affd->post_vectors)
+ masks[i].is_managed = 1;
+ else
+ masks[i].is_managed = 0;
+ }
outnodemsk:
free_node_to_cpumask(node_to_cpumask);
return masks;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index f87fa2b9935a..6b0821c144c0 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -455,7 +455,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
const struct irq_affinity_desc *cur_affinity= affinity;
const struct cpumask *mask = NULL;
struct irq_desc *desc;
- unsigned int flags;
+ unsigned int flags = 0;
int i;
/* Validate affinity mask(s) */
@@ -468,11 +468,16 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
}
}
- flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
mask = NULL;
for (i = 0; i < cnt; i++) {
if (affinity) {
+ if (affinity->is_managed) {
+ flags = IRQD_AFFINITY_MANAGED |
+ IRQD_MANAGED_SHUTDOWN;
+ } else {
+ flags = 0;
+ }
mask = &affinity->mask;
node = cpu_to_node(cpumask_first(mask));
affinity++;
--
2.17.2
Powered by blists - more mailing lists