[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180907082509.11083-2-dou_liyang@163.com>
Date: Fri, 7 Sep 2018 16:25:09 +0800
From: Dou Liyang <dou_liyang@....com>
To: linux-kernel@...r.kernel.org, x86@...nel.org
Cc: tglx@...utronix.de, mingo@...hat.com, hpa@...or.com,
douly.fnst@...fujitsu.com
Subject: [PATCH 2/2] irq/matrix: Spread managed interrupts on allocation
From: Dou Liyang <douly.fnst@...fujitsu.com>
Linux has spread out the non managed interrupt across the possible
target CPUs to avoid vector space exhaustion.
But, the same situation may happen on the managed interrupts.
Spread managed interrupt on allocation as well.
Fixes: a0c9259dc4e1("irq/matrix: Spread interrupts on allocation")
Signed-off-by: Dou Liyang <douly.fnst@...fujitsu.com>
---
arch/x86/kernel/apic/vector.c | 8 +++-----
include/linux/irq.h | 3 ++-
kernel/irq/matrix.c | 32 ++++++++++++++++++++------------
3 files changed, 25 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 9f148e3d45b4..b7fc290b4b98 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -313,14 +313,12 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
struct apic_chip_data *apicd = apic_chip_data(irqd);
int vector, cpu;
- cpumask_and(vector_searchmask, vector_searchmask, affmsk);
- cpu = cpumask_first(vector_searchmask);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
+ cpumask_and(vector_searchmask, dest, affmsk);
+
/* set_affinity might call here for nothing */
if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
return 0;
- vector = irq_matrix_alloc_managed(vector_matrix, cpu);
+ vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask, &cpu);
trace_vector_alloc_managed(irqd->irq, vector, vector);
if (vector < 0)
return vector;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12a9957..c9bffda04a45 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int *mapped_cpu);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5eb0c8b857f0..b449a749b354 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -259,21 +259,29 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
* @m: Matrix pointer
* @cpu: On which CPU the interrupt should be allocated
*/
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int *mapped_cpu)
{
- struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
unsigned int bit, end = m->alloc_end;
+ unsigned int best_cpu = UINT_MAX;
+ struct cpumap *cm;
- /* Get managed bit which are not allocated */
- bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
- bit = find_first_bit(m->scratch_map, end);
- if (bit >= end)
- return -ENOSPC;
- set_bit(bit, cm->alloc_map);
- cm->allocated++;
- m->total_allocated++;
- trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
- return bit;
+ if (matrix_find_best_cpu(m, msk, &best_cpu)) {
+ cm = per_cpu_ptr(m->maps, best_cpu);
+ end = m->alloc_end;
+ /* Get managed bit which are not allocated */
+ bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
+ bit = find_first_bit(m->scratch_map, end);
+ if (bit >= end)
+ return -ENOSPC;
+ set_bit(bit, cm->alloc_map);
+ cm->allocated++;
+ m->total_allocated++;
+ *mapped_cpu = best_cpu;
+ trace_irq_matrix_alloc_managed(bit, best_cpu, m, cm);
+ return bit;
+ }
+ return -ENOSPC;
}
/**
--
2.14.3
Powered by blists - more mailing lists