[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1603448245-79429-3-git-send-email-guoren@kernel.org>
Date: Fri, 23 Oct 2020 10:17:25 +0000
From: guoren@...nel.org
To: palmerdabbelt@...gle.com, paul.walmsley@...ive.com,
anup@...infault.org, greentime.hu@...ive.com, zong.li@...ive.com,
atish.patra@....com, tglx@...utronix.de, jason@...edaemon.net,
maz@...nel.org, wesley@...ive.com, yash.shah@...ive.com, hch@....de
Cc: linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
guoren@...nel.org, Guo Ren <guoren@...ux.alibaba.com>
Subject: [PATCH 3/3] irqchip/irq-sifive-plic: Fixup set_affinity enable irq unexpected
From: Guo Ren <guoren@...ux.alibaba.com>
For PLIC, we only have enable registers to control per hart's irq
affinity and irq_set_affinity would call plic_irq_toggle to enable
the IRQ's routing. So we can't enable irq in irq_domain_map before
request_irq, it'll let uninitialized devices' irq exception.
The solution is to check the irq has been enabled, just like what
irq-gic-v3 has done in gic_set_affinity.
Signed-off-by: Guo Ren <guoren@...ux.alibaba.com>
---
drivers/irqchip/irq-sifive-plic.c | 45 ++++++++++++++++++++++++++++++++++++---
1 file changed, 42 insertions(+), 3 deletions(-)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 0003322..1a63859 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -130,6 +130,36 @@ static void plic_irq_mask(struct irq_data *d)
}
#ifdef CONFIG_SMP
+static inline bool plic_toggle_is_enabled(struct plic_handler *handler,
+ int hwirq)
+{
+ u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
+ u32 hwirq_mask = 1 << (hwirq % 32);
+
+ if (readl(reg) & hwirq_mask)
+ return true;
+ else
+ return false;
+}
+
+static inline bool plic_irq_is_enabled(const struct cpumask *mask,
+ struct irq_data *d)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+ if (!handler->present)
+ continue;
+
+ if (plic_toggle_is_enabled(handler, d->hwirq))
+ return true;
+ }
+
+ return false;
+}
+
static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
@@ -141,8 +171,10 @@ static int plic_set_affinity(struct irq_data *d,
irq_data_update_effective_affinity(d, &amask);
- plic_irq_toggle(&priv->lmask, d, 0);
- plic_irq_toggle(&amask, d, 1);
+ if (plic_irq_is_enabled(&priv->lmask, d)) {
+ plic_irq_toggle(&priv->lmask, d, 0);
+ plic_irq_toggle(&amask, d, 1);
+ }
return IRQ_SET_MASK_OK_DONE;
}
@@ -168,12 +200,19 @@ static struct irq_chip plic_chip = {
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
+ unsigned int cpu;
struct plic_priv *priv = d->host_data;
irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_noprobe(irq);
- irq_set_affinity(irq, &priv->lmask);
+
+ cpu = cpumask_any_and(&priv->lmask, cpu_online_mask);
+ if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
+ return -EINVAL;
+
+ irq_set_affinity(irq, cpumask_of(cpu));
+
return 0;
}
--
2.7.4
Powered by blists - more mailing lists