[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251016195902.338629-1-cmirabil@redhat.com>
Date: Thu, 16 Oct 2025 15:58:57 -0400
From: Charles Mirabile <cmirabil@...hat.com>
To: tglx@...utronix.de
Cc: alex@...ti.fr,
aou@...s.berkeley.edu,
cmirabil@...hat.com,
conor+dt@...nel.org,
devicetree@...r.kernel.org,
dramforever@...e.com,
krzk+dt@...nel.org,
linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org,
lzampier@...hat.com,
palmer@...belt.com,
paul.walmsley@...ive.com,
robh@...nel.org,
samuel.holland@...ive.com,
zhangxincheng@...rarisc.com
Subject: Re: [PATCH v5 3/3] irqchip/plic: add support for UltraRISC DP1000 PLIC
Hi Thomas -
On Thu, Oct 16, 2025 at 07:53:26PM +0200, Thomas Gleixner wrote:
> On Thu, Oct 16 2025 at 12:52, Charles Mirabile wrote:
> > On Thu, Oct 16, 2025 at 12:12 PM Thomas Gleixner <tglx@...utronix.de> wrote:
> >> >> bit = ffs(pending) - 1;
> >> >> handler->enabled_clear[group] |= BIT(bit);
> >> >> for (int i = 0; i < nr_irq_groups; i++)
> >> >> writel_relaxed(handler->enabled_clear[i], enable + i * sizeof(u32));
> >> >> handler->enabled_clear[group] = 0;
> >> >>
> >> >> No?
> >> >
> >> > Sure that would also work, but why are we using ffs (slow) only to
> >> > shift the result back to make a new mask when (x & -x) is faster and
> >> > skips the intermediate step delivering immediately the mask of the
> >> > lowest bit.
> >>
> >> Because I did not spend time thinking about it.
> >
> > Sorry, did you mean "because I had not considered the original
> > approach carefully enough" or "because this other approach, while
> > slower, is more self evidently correct."
>
> I did not think about x & -x :)
>
> >> It's a pointer in struct plic_handler (or whatever it's named) and you
> >> can allocate it when the quirk is required. The pointer is definitely
> >> not a burden for anyone else.
> >
> > This I still don't understand how this is particuarly helpful. Since
> > we are doing mmio, this is going to be an explicit loop and not a
> > memcpy. The code is branchless in either case (set equal for the check
> > of i against j negate and and with mask before loading into the mmio).
>
> Fair enough. I did not think in RISC ASM :)
What do you think about the attached patch (it should be not corrupt :^)
I think I adressed your concerns, and I ran it through clang-format too.
I folded everything into one diff for ease of review, but when we send it
officially there will be a separate patch for the caching refactor.
Best - Charlie
---
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index cbd7697bc148..f8f3384ecaab 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -49,6 +49,8 @@
#define CONTEXT_ENABLE_BASE 0x2000
#define CONTEXT_ENABLE_SIZE 0x80
+#define PENDING_BASE 0x1000
+
/*
* Each hart context has a set of control registers associated with it. Right
* now there's only two: a source priority threshold over which the hart will
@@ -63,6 +65,7 @@
#define PLIC_ENABLE_THRESHOLD 0
#define PLIC_QUIRK_EDGE_INTERRUPT 0
+#define PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM 1
struct plic_priv {
struct fwnode_handle *fwnode;
@@ -94,15 +97,22 @@ static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static int plic_irq_set_type(struct irq_data *d, unsigned int type);
-static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
+static void __plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
- u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
+ u32 __iomem *base = handler->enable_base;
u32 hwirq_mask = 1 << (hwirq % 32);
+ int group = hwirq / 32;
+ u32 value;
+
+ value = readl(base + group);
if (enable)
- writel(readl(reg) | hwirq_mask, reg);
+ value |= hwirq_mask;
else
- writel(readl(reg) & ~hwirq_mask, reg);
+ value &= ~hwirq_mask;
+
+ handler->enable_save[group] = value;
+ writel(value, base + group);
}
static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
@@ -110,7 +120,7 @@ static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
unsigned long flags;
raw_spin_lock_irqsave(&handler->enable_lock, flags);
- __plic_toggle(handler->enable_base, hwirq, enable);
+ __plic_toggle(handler, hwirq, enable);
raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
@@ -247,33 +257,16 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
static int plic_irq_suspend(void)
{
- unsigned int i, cpu;
- unsigned long flags;
- u32 __iomem *reg;
struct plic_priv *priv;
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
/* irq ID 0 is reserved */
- for (i = 1; i < priv->nr_irqs; i++) {
+ for (unsigned int i = 1; i < priv->nr_irqs; i++) {
__assign_bit(i, priv->prio_save,
readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
}
- for_each_present_cpu(cpu) {
- struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
-
- if (!handler->present)
- continue;
-
- raw_spin_lock_irqsave(&handler->enable_lock, flags);
- for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
- reg = handler->enable_base + i * sizeof(u32);
- handler->enable_save[i] = readl(reg);
- }
- raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
- }
-
return 0;
}
@@ -398,6 +391,85 @@ static void plic_handle_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static bool cp100_isolate_pending_irq(int nr_irq_groups, u32 ie[],
+ u32 __iomem *pending,
+ u32 __iomem *enable)
+{
+ u32 pending_irqs = 0;
+ int i, j;
+
+ /* Look for first pending interrupt */
+ for (i = 0; !pending_irqs && i < nr_irq_groups; i++)
+ pending_irqs = ie[i] & readl_relaxed(pending + i);
+
+ if (!pending_irqs)
+ return false;
+
+ /* Isolate lowest set bit*/
+ pending_irqs &= -pending_irqs;
+
+ /* Disable all interrupts but the first pending one */
+ for (j = 0; j < nr_irq_groups; j++)
+ writel_relaxed(j == i ? pending_irqs : 0, enable + j);
+
+ return true;
+}
+
+static irq_hw_number_t cp100_get_hwirq(struct plic_handler *handler,
+ void __iomem *claim)
+{
+ int nr_irq_groups = DIV_ROUND_UP(handler->priv->nr_irqs, 32);
+ u32 __iomem *pending = handler->priv->regs + PENDING_BASE;
+ u32 __iomem *enable = handler->enable_base;
+ irq_hw_number_t hwirq = 0;
+ int i;
+
+ guard(raw_spinlock)(&handler->enable_lock);
+
+ /* Existing enable state is already cached in enable_save */
+ if (!cp100_isolate_pending_irq(nr_irq_groups, handler->enable_save, pending, enable))
+ return 0;
+
+ /*
+ * Interrupts delievered to hardware still become pending, but only
+ * interrupts that are both pending and enabled can be claimed.
+ * Clearing enable bit for all interrupts but the first pending one
+ * avoids hardware bug that occurs during read from claim register
+ * with more than one eligible interrupt.
+ */
+
+ hwirq = readl(claim);
+
+ /* Restore previous state */
+ for (i = 0; i < nr_irq_groups; i++)
+ writel_relaxed(handler->enable_save[i], enable + i);
+
+ return hwirq;
+}
+
+static void plic_handle_irq_cp100(struct irq_desc *desc)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
+ irq_hw_number_t hwirq;
+
+ WARN_ON_ONCE(!handler->present);
+
+ chained_irq_enter(chip, desc);
+
+ while ((hwirq = cp100_get_hwirq(handler, claim))) {
+ int err = generic_handle_domain_irq(handler->priv->irqdomain, hwirq);
+
+ if (unlikely(err)) {
+ pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n",
+ handler->priv->fwnode, hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
{
/* priority must be > threshold to trigger an interrupt */
@@ -434,6 +506,8 @@ static const struct of_device_id plic_match[] = {
.data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
{ .compatible = "thead,c900-plic",
.data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
+ { .compatible = "ultrarisc,cp100-plic",
+ .data = (const void *)BIT(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM) },
{}
};
@@ -668,12 +742,17 @@ static int plic_probe(struct fwnode_handle *fwnode)
}
if (global_setup) {
+ void (*handler_fn)(struct irq_desc *) = plic_handle_irq;
+
+ if (test_bit(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM, &handler->priv->plic_quirks))
+ handler_fn = plic_handle_irq_cp100;
+
/* Find parent domain and register chained handler */
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
if (domain)
plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
if (plic_parent_irq)
- irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
+ irq_set_chained_handler(plic_parent_irq, handler_fn);
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
--
2.51.0
Powered by blists - more mailing lists