[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181022114517.22748-3-anup@brainfault.org>
Date: Mon, 22 Oct 2018 17:15:15 +0530
From: Anup Patel <anup@...infault.org>
To: Palmer Dabbelt <palmer@...ive.com>,
Albert Ou <aou@...s.berkeley.edu>,
Daniel Lezcano <daniel.lezcano@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Jason Cooper <jason@...edaemon.net>,
Marc Zyngier <marc.zyngier@....com>
Cc: Atish Patra <atish.patra@....com>,
Christoph Hellwig <hch@...radead.org>,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
Anup Patel <anup@...infault.org>
Subject: [PATCH 2/4] irqchip: sifive-plic: More flexible plic_irq_toggle()
We make plic_irq_toggle() more generic so that we can enable/disable
hwirq for given cpumask. This generic plic_irq_toggle() will be
eventually used to implement set_affinity for PLIC driver.
Signed-off-by: Anup Patel <anup@...infault.org>
---
drivers/irqchip/irq-sifive-plic.c | 89 ++++++++++++++++---------------
1 file changed, 47 insertions(+), 42 deletions(-)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index f93ec83eaff4..869355d2a713 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -55,19 +55,25 @@
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
-static void __iomem *plic_regs;
-
struct plic_handler {
bool present;
- int ctxid;
void __iomem *hart_base;
raw_spinlock_t enable_lock;
void __iomem *enable_base;
};
-static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
-static inline void plic_toggle(struct plic_handler *handler,
- int hwirq, int enable)
+struct plic_hw {
+ u32 nr_irqs;
+ u32 nr_handlers;
+ u32 nr_mapped;
+ void __iomem *regs;
+ struct plic_handler *handlers;
+ struct irq_domain *irqdomain;
+};
+
+static struct plic_hw plic;
+
+static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
u32 __iomem *reg = handler->enable_base + (hwirq / 32);
u32 hwirq_mask = 1 << (hwirq % 32);
@@ -80,27 +86,23 @@ static inline void plic_toggle(struct plic_handler *handler,
raw_spin_unlock(&handler->enable_lock);
}
-static inline void plic_irq_toggle(struct irq_data *d, int enable)
+static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
{
int cpu;
- writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
- for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
- struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
-
- if (handler->present)
- plic_toggle(handler, d->hwirq, enable);
- }
+ writel(enable, plic.regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
+ for_each_cpu(cpu, mask)
+ plic_toggle(per_cpu_ptr(plic.handlers, cpu), hwirq, enable);
}
static void plic_irq_enable(struct irq_data *d)
{
- plic_irq_toggle(d, 1);
+ plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
}
static void plic_irq_disable(struct irq_data *d)
{
- plic_irq_toggle(d, 0);
+ plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 0);
}
static struct irq_chip plic_chip = {
@@ -127,8 +129,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static struct irq_domain *plic_irqdomain;
-
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
@@ -137,7 +137,7 @@ static struct irq_domain *plic_irqdomain;
*/
static void plic_handle_irq(struct pt_regs *regs)
{
- struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ struct plic_handler *handler = this_cpu_ptr(plic.handlers);
void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;
@@ -145,7 +145,7 @@ static void plic_handle_irq(struct pt_regs *regs)
csr_clear(sie, SIE_SEIE);
while ((hwirq = readl(claim))) {
- int irq = irq_find_mapping(plic_irqdomain, hwirq);
+ int irq = irq_find_mapping(plic.irqdomain, hwirq);
if (unlikely(irq <= 0))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
@@ -174,36 +174,39 @@ static int plic_find_hart_id(struct device_node *node)
static int __init plic_init(struct device_node *node,
struct device_node *parent)
{
- int error = 0, nr_handlers, nr_mapped = 0, i;
- u32 nr_irqs;
+ int error = 0, i;
- if (plic_regs) {
+ if (plic.regs) {
pr_warn("PLIC already present.\n");
return -ENXIO;
}
- plic_regs = of_iomap(node, 0);
- if (WARN_ON(!plic_regs))
+ plic.regs = of_iomap(node, 0);
+ if (WARN_ON(!plic.regs))
return -EIO;
error = -EINVAL;
- of_property_read_u32(node, "riscv,ndev", &nr_irqs);
- if (WARN_ON(!nr_irqs))
+ of_property_read_u32(node, "riscv,ndev", &plic.nr_irqs);
+ if (WARN_ON(!plic.nr_irqs))
goto out_iounmap;
- nr_handlers = of_irq_count(node);
- if (WARN_ON(!nr_handlers))
+ plic.nr_handlers = of_irq_count(node);
+ if (WARN_ON(!plic.nr_handlers))
goto out_iounmap;
- if (WARN_ON(nr_handlers < num_possible_cpus()))
+ if (WARN_ON(plic.nr_handlers < num_possible_cpus()))
goto out_iounmap;
error = -ENOMEM;
- plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
- &plic_irqdomain_ops, NULL);
- if (WARN_ON(!plic_irqdomain))
+ plic.handlers = alloc_percpu(struct plic_handler);
+ if (!plic.handlers)
goto out_iounmap;
- for (i = 0; i < nr_handlers; i++) {
+ plic.irqdomain = irq_domain_add_linear(node, plic.nr_irqs + 1,
+ &plic_irqdomain_ops, NULL);
+ if (WARN_ON(!plic.irqdomain))
+ goto out_free_handlers;
+
+ for (i = 0; i < plic.nr_handlers; i++) {
struct of_phandle_args parent;
struct plic_handler *handler;
irq_hw_number_t hwirq;
@@ -225,29 +228,31 @@ static int __init plic_init(struct device_node *node,
}
cpu = riscv_hartid_to_cpuid(hartid);
- handler = per_cpu_ptr(&plic_handlers, cpu);
+ handler = per_cpu_ptr(plic.handlers, cpu);
handler->present = true;
- handler->ctxid = i;
handler->hart_base =
- plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ plic.regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
raw_spin_lock_init(&handler->enable_lock);
handler->enable_base =
- plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
+ plic.regs + ENABLE_BASE + i * ENABLE_PER_HART;
/* priority must be > threshold to trigger an interrupt */
writel(0, handler->hart_base + CONTEXT_THRESHOLD);
- for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ for (hwirq = 1; hwirq <= plic.nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
- nr_mapped++;
+
+ plic.nr_mapped++;
}
pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
- nr_irqs, nr_mapped, nr_handlers);
+ plic.nr_irqs, plic.nr_mapped, plic.nr_handlers);
set_handle_irq(plic_handle_irq);
return 0;
+out_free_handlers:
+ free_percpu(plic.handlers);
out_iounmap:
- iounmap(plic_regs);
+ iounmap(plic.regs);
return error;
}
--
2.17.1
Powered by blists - more mailing lists