[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAOOg__AcRVPRXsDdPPe3QkJybiTYSRCLLHR59qVnH2burfRaNw@mail.gmail.com>
Date: Fri, 24 Oct 2025 09:34:47 +0100
From: Lucas Zampieri <lzampier@...hat.com>
To: Charles Mirabile <cmirabil@...hat.com>
Cc: tglx@...utronix.de, alex@...ti.fr, aou@...s.berkeley.edu,
conor+dt@...nel.org, devicetree@...r.kernel.org, dramforever@...e.com,
krzk+dt@...nel.org, linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org, palmer@...belt.com, paul.walmsley@...ive.com,
robh@...nel.org, samuel.holland@...ive.com
Subject: Re: [PATCH v6 0/4] Add UltraRISC DP1000 PLIC support
Hi Thomas and Charles,
Yes, missed the cc list to on that one, sending the v6 series again
with the correct headers.
Sorry about that.
Lucas Zampieri
Platform Enablement Team
On Thu, Oct 23, 2025 at 9:17 PM Charles Mirabile <cmirabil@...hat.com> wrote:
>
> Hi Thomas—
>
> On Thu, Oct 23, 2025 at 09:29:44PM +0200, Thomas Gleixner wrote:
> > On Thu, Oct 23 2025 at 15:00, Lucas Zampieri wrote:
> > > This series adds support for the PLIC implementation in the UltraRISC
> > > DP1000 SoC. The UR-CP100 cores used in the DP1000 have a hardware bug in
> > > their PLIC claim register where reading it while multiple interrupts are
> > > pending can return the wrong interrupt ID. The workaround temporarily
> > > disables all interrupts except the first pending one before reading the
> > > claim register, then restores the previous state.
> > >
> > > The driver matches on "ultrarisc,cp100-plic" (CPU core compatible), allowing
> > > the quirk to apply to all SoCs using UR-CP100 cores (currently DP1000,
> > > potentially future SoCs).
> > >
> > > Charles Mirabile (3):
> > > dt-bindings: interrupt-controller: add UltraRISC DP1000 PLIC
> > > irqchip/plic: enable optimization of interrupt enable state
> >
> > That one never showed up. Neither in my inbox nor on lore
>
> Looks like the CC list was missing somehow from that patch—I didn't notice because I got it in my inbox because of my Signed-off-by.
>
> The indexing on the patches was slightly wrong anyways, so we will resend tomorrow. Sorry for the noise.
>
> I have attached it here in case you want to take a look.
>
> >
> -- >8 --
> From: Charles Mirabile <cmirabil@...hat.com>
> Subject: [PATCH v6 3/4] irqchip/plic: enable optimization of interrupt enable state
>
> Optimize the PLIC driver by maintaining the interrupt enable state in
> the handler's enable_save array during normal operation rather than only
> during suspend/resume. This eliminates the need to read enable registers
> during suspend and makes the enable state immediately available for
> other optimizations.
>
> Modify __plic_toggle() to take a handler pointer instead of enable_base,
> allowing it to update both the hardware registers and the cached
> enable_save state atomically within the existing enable_lock protection.
>
> Remove the suspend-time enable register reading since enable_save now
> always reflects the current state.
>
> Signed-off-by: Charles Mirabile <cmirabil@...hat.com>
>
> ---
> drivers/irqchip/irq-sifive-plic.c | 36 +++++++++++--------------------
> 1 file changed, 13 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
> index cbd7697bc1481..d518a8b468742 100644
> --- a/drivers/irqchip/irq-sifive-plic.c
> +++ b/drivers/irqchip/irq-sifive-plic.c
> @@ -94,15 +94,22 @@ static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
>
> static int plic_irq_set_type(struct irq_data *d, unsigned int type);
>
> -static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
> +static void __plic_toggle(struct plic_handler *handler, int hwirq, int enable)
> {
> - u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
> + u32 __iomem *base = handler->enable_base;
> u32 hwirq_mask = 1 << (hwirq % 32);
> + int group = hwirq / 32;
> + u32 value;
> +
> + value = readl(base + group);
>
> if (enable)
> - writel(readl(reg) | hwirq_mask, reg);
> + value |= hwirq_mask;
> else
> - writel(readl(reg) & ~hwirq_mask, reg);
> + value &= ~hwirq_mask;
> +
> + handler->enable_save[group] = value;
> + writel(value, base + group);
> }
>
> static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
> @@ -110,7 +117,7 @@ static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
> unsigned long flags;
>
> raw_spin_lock_irqsave(&handler->enable_lock, flags);
> - __plic_toggle(handler->enable_base, hwirq, enable);
> + __plic_toggle(handler, hwirq, enable);
> raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
> }
>
> @@ -247,33 +254,16 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
>
> static int plic_irq_suspend(void)
> {
> - unsigned int i, cpu;
> - unsigned long flags;
> - u32 __iomem *reg;
> struct plic_priv *priv;
>
> priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
>
> /* irq ID 0 is reserved */
> - for (i = 1; i < priv->nr_irqs; i++) {
> + for (unsigned int i = 1; i < priv->nr_irqs; i++) {
> __assign_bit(i, priv->prio_save,
> readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
> }
>
> - for_each_present_cpu(cpu) {
> - struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
> -
> - if (!handler->present)
> - continue;
> -
> - raw_spin_lock_irqsave(&handler->enable_lock, flags);
> - for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
> - reg = handler->enable_base + i * sizeof(u32);
> - handler->enable_save[i] = readl(reg);
> - }
> - raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
> - }
> -
> return 0;
> }
>
> --
> 2.51.0
>
>
Powered by blists - more mailing lists