[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200316115433.9017-2-maz@kernel.org>
Date: Mon, 16 Mar 2020 11:54:32 +0000
From: Marc Zyngier <maz@...nel.org>
To: linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org
Cc: John Garry <john.garry@...wei.com>,
chenxiang <chenxiang66@...ilicon.com>,
Zhou Wang <wangzhou1@...ilicon.com>,
Ming Lei <ming.lei@...hat.com>,
Jason Cooper <jason@...edaemon.net>,
Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH v3 1/2] irqchip/gic-v3-its: Track LPI distribution on a per CPU basis
In order to improve the distribution of LPIs among CPUs, let start by
tracking the number of LPIs assigned to CPUs, both for managed and
non-managed interrupts (as separate counters).
Signed-off-by: Marc Zyngier <maz@...nel.org>
---
drivers/irqchip/irq-gic-v3-its.c | 35 ++++++++++++++++++++++++++++++++
1 file changed, 35 insertions(+)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 34e5a06ec874..941786e1e8f7 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -173,6 +173,13 @@ static struct {
int next_victim;
} vpe_proxy;
+struct cpu_lpi_count {
+ atomic_t managed;
+ atomic_t unmanaged;
+};
+
+static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
+
static LIST_HEAD(its_nodes);
static DEFINE_RAW_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
@@ -1500,6 +1507,30 @@ static void its_unmask_irq(struct irq_data *d)
lpi_update_config(d, 0, LPI_PROP_ENABLED);
}
+static u32 its_read_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+
+ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static void its_inc_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+ else
+ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static void its_dec_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+ else
+ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
@@ -1529,6 +1560,8 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
/* don't set the affinity when the target cpu is same as current one */
if (cpu != its_dev->event_map.col_map[id]) {
+ its_inc_lpi_count(d, cpu);
+ its_dec_lpi_count(d, its_dev->event_map.col_map[id]);
target_col = &its_dev->its->collections[cpu];
its_send_movi(its_dev, target_col, id);
its_dev->event_map.col_map[id] = cpu;
@@ -3438,6 +3471,7 @@ static int its_irq_domain_activate(struct irq_domain *domain,
cpu = cpumask_first(cpu_online_mask);
}
+ its_inc_lpi_count(d, cpu);
its_dev->event_map.col_map[event] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -3452,6 +3486,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain,
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
/* Stop the delivery of interrupts */
its_send_discard(its_dev, event);
}
--
2.20.1
Powered by blists - more mailing lists