[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8735euzq60.wl-maz@kernel.org>
Date: Thu, 21 Jul 2022 12:00:23 +0100
From: Marc Zyngier <maz@...nel.org>
To: Anup Patel <apatel@...tanamicro.com>
Cc: Palmer Dabbelt <palmer@...belt.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Thomas Gleixner <tglx@...utronix.de>,
Daniel Lezcano <daniel.lezcano@...aro.org>,
Atish Patra <atishp@...shpatra.org>,
Alistair Francis <Alistair.Francis@....com>,
Anup Patel <anup@...infault.org>,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v7 3/7] genirq: Add mechanism to multiplex a single HW IPI
On Wed, 20 Jul 2022 16:23:44 +0100,
Anup Patel <apatel@...tanamicro.com> wrote:
>
> All RISC-V platforms have a single HW IPI provided by the INTC local
> interrupt controller. The HW method to trigger INTC IPI can be through
> external irqchip (e.g. RISC-V AIA), through platform specific device
> (e.g. SiFive CLINT timer), or through firmware (e.g. SBI IPI call).
>
> To support multiple IPIs on RISC-V, we add a generic IPI multiplexing
> mechanism which help us create multiple virtual IPIs using a single
> HW IPI. This generic IPI multiplexing is shared among various RISC-V
> irqchip drivers.
>
> Signed-off-by: Anup Patel <apatel@...tanamicro.com>
> ---
> include/linux/irq.h | 16 ++++
> kernel/irq/Kconfig | 4 +
> kernel/irq/Makefile | 1 +
> kernel/irq/ipi-mux.c | 199 +++++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 220 insertions(+)
> create mode 100644 kernel/irq/ipi-mux.c
>
> diff --git a/include/linux/irq.h b/include/linux/irq.h
> index 505308253d23..a97bf13a8965 100644
> --- a/include/linux/irq.h
> +++ b/include/linux/irq.h
> @@ -1249,6 +1249,22 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
> int ipi_send_single(unsigned int virq, unsigned int cpu);
> int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
>
> +/**
> + * struct ipi_mux_ops - IPI multiplex operations
> + *
> + * @ipi_mux_clear: Optional function to clear parent IPI
> + * @ipi_mux_send: Trigger parent IPI on target CPUs
> + */
> +struct ipi_mux_ops {
> + void (*ipi_mux_clear)(unsigned int parent_virq);
> + void (*ipi_mux_send)(unsigned int parent_virq,
> + const struct cpumask *mask);
> +};
> +
> +void ipi_mux_process(void);
> +int ipi_mux_create(unsigned int parent_virq, unsigned int nr_ipi,
> + const struct ipi_mux_ops *ops);
> +
> #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
> /*
> * Registers a generic IRQ handling function as the top-level IRQ handler in
> diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
> index 10929eda9825..2388e7d40ed3 100644
> --- a/kernel/irq/Kconfig
> +++ b/kernel/irq/Kconfig
> @@ -84,6 +84,10 @@ config GENERIC_IRQ_IPI
> bool
> select IRQ_DOMAIN_HIERARCHY
>
> +# Generic IRQ IPI Mux support
> +config GENERIC_IRQ_IPI_MUX
> + bool
> +
> # Generic MSI interrupt support
> config GENERIC_MSI_IRQ
> bool
> diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
> index b4f53717d143..f19d3080bf11 100644
> --- a/kernel/irq/Makefile
> +++ b/kernel/irq/Makefile
> @@ -15,6 +15,7 @@ obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
> obj-$(CONFIG_PM_SLEEP) += pm.o
> obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
> obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
> +obj-$(CONFIG_GENERIC_IRQ_IPI_MUX) += ipi-mux.o
> obj-$(CONFIG_SMP) += affinity.o
> obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
> obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o
> diff --git a/kernel/irq/ipi-mux.c b/kernel/irq/ipi-mux.c
> new file mode 100644
> index 000000000000..bd6b31ca588b
> --- /dev/null
> +++ b/kernel/irq/ipi-mux.c
> @@ -0,0 +1,199 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Multiplex several virtual IPIs over a single HW IPI.
> + *
> + * Copyright (c) 2022 Ventana Micro Systems Inc.
> + */
> +
> +#define pr_fmt(fmt) "ipi-mux: " fmt
> +#include <linux/cpu.h>
> +#include <linux/init.h>
> +#include <linux/irq.h>
> +#include <linux/irqchip.h>
> +#include <linux/irqchip/chained_irq.h>
> +#include <linux/irqdomain.h>
> +#include <linux/smp.h>
> +
> +static unsigned int ipi_mux_nr;
> +static unsigned int ipi_mux_parent_virq;
> +static struct irq_domain *ipi_mux_domain;
> +static const struct ipi_mux_ops *ipi_mux_ops;
> +static DEFINE_PER_CPU(unsigned long, ipi_mux_bits);
> +
> +static void ipi_mux_send_mask(struct irq_data *d, const struct cpumask *mask)
> +{
> + int cpu;
> +
> + /* Barrier before doing atomic bit update to IPI bits */
> + smp_mb__before_atomic();
> +
> + for_each_cpu(cpu, mask)
> + set_bit(d->hwirq, per_cpu_ptr(&ipi_mux_bits, cpu));
> +
> + /* Barrier after doing atomic bit update to IPI bits */
> + smp_mb__after_atomic();
> +
> + /* Trigger the parent IPI */
> + ipi_mux_ops->ipi_mux_send(ipi_mux_parent_virq, mask);
> +}
> +
> +static const struct irq_chip ipi_mux_chip = {
> + .name = "IPI Mux",
> + .ipi_send_mask = ipi_mux_send_mask,
I've given this a bit more though, and I came to the conclusion that
we really should have the full masking semantics here, even if Linux
currently doesn't really use it.
It makes the handling a bit more complex, and unmasking a pending IPI
must be handled gracefully, but we already have implemented most of
that code in the irq-apple-aic driver.
And if we go down this road, such a driver should be very easy to move
over this infrastructure, making the change a lot more palatable.
M.
--
Without deviation from the norm, progress is not possible.
Powered by blists - more mailing lists