[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230122081607.959474-2-vigneshr@ti.com>
Date: Sun, 22 Jan 2023 13:46:06 +0530
From: Vignesh Raghavendra <vigneshr@...com>
To: Nishanth Menon <nm@...com>, Tero Kristo <kristo@...nel.org>,
Santosh Shilimkar <ssantosh@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Marc Zyngier <maz@...nel.org>
CC: <linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>,
Vignesh Raghavendra <vigneshr@...com>
Subject: [RFC PATCH 1/2] irqchip: irq-ti-sci-inta: Don't aggregate MSI events until necessary
INTA on K3 SoCs convert DMA global events (MSI) to wired interrupts
(VINT). Currently driver maps multiple events to single wired interrupt
lines. This makes setting IRQ affinity impossible as migrating wired
interrupt to different core will end up migrating all events to that
core.
Allocate a dedicated VINT for each DMA event request until there are no
more VINTs. This creates a 1:1 DMA event to VINT mapping and thus
provides unique IRQ line. This will help allocate dedicated IRQs for
high performance DMA channels which can thus be mapped to particular
CPUs using IRQ affinity.
Signed-off-by: Vignesh Raghavendra <vigneshr@...com>
---
drivers/irqchip/irq-ti-sci-inta.c | 45 +++++++++++++++++--------------
1 file changed, 25 insertions(+), 20 deletions(-)
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index a6ecc53d055c..f1419d24568e 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -202,7 +202,8 @@ static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta,
*
* Return 0 if all went well else corresponding error value.
*/
-static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain)
+static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain,
+ u16 vint_id)
{
struct ti_sci_inta_irq_domain *inta = domain->host_data;
struct ti_sci_inta_vint_desc *vint_desc;
@@ -210,11 +211,6 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
struct device_node *parent_node;
unsigned int parent_virq;
int p_hwirq, ret;
- u16 vint_id;
-
- vint_id = ti_sci_get_free_resource(inta->vint);
- if (vint_id == TI_SCI_RESOURCE_NULL)
- return ERR_PTR(-EINVAL);
p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id);
if (p_hwirq < 0) {
@@ -328,29 +324,38 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *d
struct ti_sci_inta_vint_desc *vint_desc = NULL;
struct ti_sci_inta_event_desc *event_desc;
u16 free_bit;
+ u16 vint_id;
mutex_lock(&inta->vint_mutex);
- list_for_each_entry(vint_desc, &inta->vint_list, list) {
+ /*
+ * Allocate new VINT each time until we runout, then start
+ * aggregating
+ */
+ vint_id = ti_sci_get_free_resource(inta->vint);
+ if (vint_id == TI_SCI_RESOURCE_NULL) {
+ list_for_each_entry(vint_desc, &inta->vint_list, list) {
+ free_bit = find_first_zero_bit(vint_desc->event_map,
+ MAX_EVENTS_PER_VINT);
+ if (free_bit != MAX_EVENTS_PER_VINT)
+ set_bit(free_bit, vint_desc->event_map);
+ }
+ } else {
+ vint_desc = ti_sci_inta_alloc_parent_irq(domain, vint_id);
+ if (IS_ERR(vint_desc)) {
+ event_desc = ERR_CAST(vint_desc);
+ goto unlock;
+ }
+
free_bit = find_first_zero_bit(vint_desc->event_map,
MAX_EVENTS_PER_VINT);
- if (free_bit != MAX_EVENTS_PER_VINT) {
- set_bit(free_bit, vint_desc->event_map);
- goto alloc_event;
- }
+ set_bit(free_bit, vint_desc->event_map);
}
- /* No free bits available. Allocate a new vint */
- vint_desc = ti_sci_inta_alloc_parent_irq(domain);
- if (IS_ERR(vint_desc)) {
- event_desc = ERR_CAST(vint_desc);
+ if (free_bit == MAX_EVENTS_PER_VINT) {
+ event_desc = ERR_PTR(-EINVAL);
goto unlock;
}
- free_bit = find_first_zero_bit(vint_desc->event_map,
- MAX_EVENTS_PER_VINT);
- set_bit(free_bit, vint_desc->event_map);
-
-alloc_event:
event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq);
if (IS_ERR(event_desc))
clear_bit(free_bit, vint_desc->event_map);
--
2.39.0
Powered by blists - more mailing lists