[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250116-boxy-handoff-2f2790b5388e@spud>
Date: Thu, 16 Jan 2025 13:58:29 +0000
From: Conor Dooley <conor@...nel.org>
To: Clément Léger <cleger@...osinc.com>
Cc: Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
Himanshu Chauhan <hchauhan@...tanamicro.com>,
Anup Patel <apatel@...tanamicro.com>,
Xu Lu <luxu.kernel@...edance.com>,
Atish Patra <atishp@...shpatra.org>
Subject: Re: [PATCH v3 3/4] drivers: firmware: add riscv SSE support
On Fri, Dec 06, 2024 at 05:30:59PM +0100, Clément Léger wrote:
> Add driver level interface to use RISC-V SSE arch support. This interface
> allows registering SSE handlers, and receive them. This will be used by
> PMU and GHES driver.
>
> Signed-off-by: Himanshu Chauhan <hchauhan@...tanamicro.com>
> Co-developed-by: Himanshu Chauhan <hchauhan@...tanamicro.com>
> Signed-off-by: Clément Léger <cleger@...osinc.com>
> ---
> MAINTAINERS | 14 +
> drivers/firmware/Kconfig | 1 +
> drivers/firmware/Makefile | 1 +
> drivers/firmware/riscv/Kconfig | 15 +
> drivers/firmware/riscv/Makefile | 3 +
> drivers/firmware/riscv/riscv_sse.c | 691 +++++++++++++++++++++++++++++
> include/linux/riscv_sse.h | 56 +++
> 7 files changed, 781 insertions(+)
> create mode 100644 drivers/firmware/riscv/Kconfig
> create mode 100644 drivers/firmware/riscv/Makefile
> create mode 100644 drivers/firmware/riscv/riscv_sse.c
> create mode 100644 include/linux/riscv_sse.h
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 686109008d8e..a3ddde7fe9fb 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -20125,6 +20125,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
> F: Documentation/devicetree/bindings/iommu/riscv,iommu.yaml
> F: drivers/iommu/riscv/
>
> +RISC-V FIRMWARE DRIVERS
> +M: Conor Dooley <conor@...nel.org>
> +L: linux-riscv@...ts.infradead.org
> +S: Maintained
> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git
> +F: drivers/firmware/riscv/*
Acked-by: Conor Dooley <conor.dooley@...rochip.com>
(got some, mostly minor, comments below)
> diff --git a/drivers/firmware/riscv/Makefile b/drivers/firmware/riscv/Makefile
> new file mode 100644
> index 000000000000..4ccfcbbc28ea
> --- /dev/null
> +++ b/drivers/firmware/riscv/Makefile
> @@ -0,0 +1,3 @@
> +# SPDX-License-Identifier: GPL-2.0
> +
> +obj-$(CONFIG_RISCV_SSE) += riscv_sse.o
> diff --git a/drivers/firmware/riscv/riscv_sse.c b/drivers/firmware/riscv/riscv_sse.c
> new file mode 100644
> index 000000000000..c165e32cc9a5
> --- /dev/null
> +++ b/drivers/firmware/riscv/riscv_sse.c
> @@ -0,0 +1,691 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2024 Rivos Inc.
> + */
> +
> +#define pr_fmt(fmt) "sse: " fmt
> +
> +#include <linux/cpu.h>
> +#include <linux/cpuhotplug.h>
> +#include <linux/cpu_pm.h>
> +#include <linux/hardirq.h>
> +#include <linux/list.h>
> +#include <linux/percpu-defs.h>
> +#include <linux/reboot.h>
> +#include <linux/riscv_sse.h>
> +#include <linux/slab.h>
> +
> +#include <asm/sbi.h>
> +#include <asm/sse.h>
> +
> +struct sse_event {
> + struct list_head list;
> + u32 evt;
> + u32 priority;
> + sse_event_handler *handler;
> + void *handler_arg;
> + bool is_enabled;
> + /* Only valid for global events */
> + unsigned int cpu;
> +
> + union {
> + struct sse_registered_event *global;
> + struct sse_registered_event __percpu *local;
> + };
> +};
> +
> +static int sse_hp_state;
> +static bool sse_available;
> +static DEFINE_SPINLOCK(events_list_lock);
> +static LIST_HEAD(events);
> +static DEFINE_MUTEX(sse_mutex);
> +
> +struct sse_registered_event {
> + struct sse_event_arch_data arch;
> + struct sse_event *evt;
> + unsigned long attr_buf;
> +};
> +
> +void sse_handle_event(struct sse_event_arch_data *arch_event,
> + struct pt_regs *regs)
> +{
> + int ret;
> + struct sse_registered_event *reg_evt =
> + container_of(arch_event, struct sse_registered_event, arch);
> + struct sse_event *evt = reg_evt->evt;
> +
> + ret = evt->handler(evt->evt, evt->handler_arg, regs);
Is it possible to get here with a null handler? Or will !registered
events not lead to the handler getting called?
> + if (ret)
> + pr_warn("event %x handler failed with error %d\n", evt->evt,
> + ret);
> +}
> +
> +static bool sse_event_is_global(u32 evt)
> +{
> + return !!(evt & SBI_SSE_EVENT_GLOBAL);
> +}
> +
> +static
> +struct sse_event *sse_event_get(u32 evt)
nit: Could you shift this into one line?
> +{
> + struct sse_event *sse_evt = NULL, *tmp;
> +
> + scoped_guard(spinlock, &events_list_lock) {
> + list_for_each_entry(tmp, &events, list) {
> + if (tmp->evt == evt) {
> + return sse_evt;
> + }
> + }
> + }
> +
> + return NULL;
> +}
> +
> +static phys_addr_t sse_event_get_phys(struct sse_registered_event *reg_evt,
> + void *addr)
> +{
> + phys_addr_t phys;
> +
> + if (sse_event_is_global(reg_evt->evt->evt))
> + phys = virt_to_phys(addr);
> + else
> + phys = per_cpu_ptr_to_phys(addr);
> +
> + return phys;
> +}
> +
> +static int sse_sbi_event_func(struct sse_event *event, unsigned long func)
> +{
> + struct sbiret ret;
> + u32 evt = event->evt;
> +
> + ret = sbi_ecall(SBI_EXT_SSE, func, evt, 0, 0, 0, 0, 0);
> + if (ret.error)
> + pr_debug("Failed to execute func %lx, event %x, error %ld\n",
> + func, evt, ret.error);
Why's this only at a debug level?
> +
> + return sbi_err_map_linux_errno(ret.error);
> +}
> +
> +static int sse_sbi_disable_event(struct sse_event *event)
> +{
> + return sse_sbi_event_func(event, SBI_SSE_EVENT_DISABLE);
> +}
> +
> +static int sse_sbi_enable_event(struct sse_event *event)
> +{
> + return sse_sbi_event_func(event, SBI_SSE_EVENT_ENABLE);
> +}
> +
> +static int sse_event_attr_get_no_lock(struct sse_registered_event *reg_evt,
> + unsigned long attr_id, unsigned long *val)
> +{
> + struct sbiret sret;
> + u32 evt = reg_evt->evt->evt;
> + unsigned long phys;
> +
> + phys = sse_event_get_phys(reg_evt, ®_evt->attr_buf);
> +
> + sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_READ, evt,
> + attr_id, 1, phys, 0, 0);
> + if (sret.error) {
> + pr_debug("Failed to get event %x attr %lx, error %ld\n", evt,
> + attr_id, sret.error);
> + return sbi_err_map_linux_errno(sret.error);
> + }
> +
> + *val = reg_evt->attr_buf;
> +
> + return 0;
> +}
> +
> +static int sse_event_attr_set_nolock(struct sse_registered_event *reg_evt,
> + unsigned long attr_id, unsigned long val)
> +{
> + struct sbiret sret;
> + u32 evt = reg_evt->evt->evt;
> + unsigned long phys;
> +
> + reg_evt->attr_buf = val;
> + phys = sse_event_get_phys(reg_evt, ®_evt->attr_buf);
> +
> + sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_WRITE, evt,
> + attr_id, 1, phys, 0, 0);
> + if (sret.error && sret.error != SBI_ERR_INVALID_STATE) {
Why's the invalid state error not treated as an error?
> + pr_debug("Failed to set event %x attr %lx, error %ld\n", evt,
> + attr_id, sret.error);
> + return sbi_err_map_linux_errno(sret.error);
> + }
> +
> + return 0;
> +}
> +
> +static int sse_event_set_target_cpu_nolock(struct sse_event *event,
> + unsigned int cpu)
> +{
> + unsigned int hart_id = cpuid_to_hartid_map(cpu);
> + struct sse_registered_event *reg_evt = event->global;
> + u32 evt = event->evt;
> + bool was_enabled;
> + int ret;
> +
> + if (!sse_event_is_global(evt))
> + return -EINVAL;
> +
> + was_enabled = event->is_enabled;
> + if (was_enabled)
> + sse_sbi_disable_event(event);
> + do {
> + ret = sse_event_attr_set_nolock(reg_evt,
> + SBI_SSE_ATTR_PREFERRED_HART,
> + hart_id);
> + } while (ret == -EINVAL);
> +
> + if (ret == 0)
> + event->cpu = cpu;
> +
> + if (was_enabled)
> + sse_sbi_enable_event(event);
> +
> + return 0;
> +}
> +
> +int sse_event_set_target_cpu(struct sse_event *event, unsigned int cpu)
> +{
> + int ret;
> +
> + scoped_guard(mutex, &sse_mutex) {
> + cpus_read_lock();
> +
> + if (!cpu_online(cpu))
> + return -EINVAL;
> +
> + ret = sse_event_set_target_cpu_nolock(event, cpu);
> +
> + cpus_read_unlock();
> + }
> +
> + return ret;
> +}
> +
> +static int sse_event_init_registered(unsigned int cpu,
> + struct sse_registered_event *reg_evt,
> + struct sse_event *event)
> +{
> + reg_evt->evt = event;
> + arch_sse_init_event(®_evt->arch, event->evt, cpu);
> +
> + return 0;
> +}
> +
> +static void sse_event_free_registered(struct sse_registered_event *reg_evt)
> +{
> + arch_sse_free_event(®_evt->arch);
> +}
> +
> +static int sse_event_alloc_global(struct sse_event *event)
> +{
> + int err;
> + struct sse_registered_event *reg_evt;
> +
> + reg_evt = kzalloc(sizeof(*reg_evt), GFP_KERNEL);
> + if (!reg_evt)
> + return -ENOMEM;
> +
> + event->global = reg_evt;
> + err = sse_event_init_registered(smp_processor_id(), reg_evt,
> + event);
> + if (err)
> + kfree(reg_evt);
> +
> + return err;
> +}
> +
> +static int sse_event_alloc_local(struct sse_event *event)
> +{
> + int err;
> + unsigned int cpu, err_cpu;
> + struct sse_registered_event *reg_evt;
> + struct sse_registered_event __percpu *reg_evts;
> +
> + reg_evts = alloc_percpu(struct sse_registered_event);
> + if (!reg_evts)
> + return -ENOMEM;
> +
> + event->local = reg_evts;
> +
> + for_each_possible_cpu(cpu) {
> + reg_evt = per_cpu_ptr(reg_evts, cpu);
> + err = sse_event_init_registered(cpu, reg_evt, event);
> + if (err) {
> + err_cpu = cpu;
> + goto err_free_per_cpu;
> + }
> + }
> +
> + return 0;
> +
> +err_free_per_cpu:
> + for_each_possible_cpu(cpu) {
> + if (cpu == err_cpu)
> + break;
> + reg_evt = per_cpu_ptr(reg_evts, cpu);
> + sse_event_free_registered(reg_evt);
> + }
> +
> + free_percpu(reg_evts);
> +
> + return err;
> +}
> +
> +static struct sse_event *sse_event_alloc(u32 evt,
> + u32 priority,
> + sse_event_handler *handler, void *arg)
> +{
> + int err;
> + struct sse_event *event;
> +
> + event = kzalloc(sizeof(*event), GFP_KERNEL);
> + if (!event)
> + return ERR_PTR(-ENOMEM);
> +
> + event->evt = evt;
> + event->priority = priority;
> + event->handler_arg = arg;
> + event->handler = handler;
> +
> + if (sse_event_is_global(evt)) {
> + err = sse_event_alloc_global(event);
> + if (err)
> + goto err_alloc_reg_evt;
> + } else {
> + err = sse_event_alloc_local(event);
> + if (err)
> + goto err_alloc_reg_evt;
> + }
> +
> + return event;
> +
> +err_alloc_reg_evt:
> + kfree(event);
> +
> + return ERR_PTR(err);
> +}
> +
> +static int sse_sbi_register_event(struct sse_event *event,
> + struct sse_registered_event *reg_evt)
> +{
> + int ret;
> +
> + ret = sse_event_attr_set_nolock(reg_evt, SBI_SSE_ATTR_PRIO,
> + event->priority);
> + if (ret)
> + return ret;
> +
> + return arch_sse_register_event(®_evt->arch);
> +}
> +
> +static int sse_event_register_local(struct sse_event *event)
> +{
> + int ret;
> + struct sse_registered_event *reg_evt = per_cpu_ptr(event->local,
> + smp_processor_id());
> +
> + ret = sse_sbi_register_event(event, reg_evt);
> + if (ret)
> + pr_debug("Failed to register event %x: err %d\n", event->evt,
> + ret);
Same here I guess, why's a registration failure only a debug print?
> +
> + return ret;
> +}
> +
> +
> +static int sse_sbi_unregister_event(struct sse_event *event)
> +{
> + return sse_sbi_event_func(event, SBI_SSE_EVENT_UNREGISTER);
> +}
> +
> +struct sse_per_cpu_evt {
> + struct sse_event *event;
> + unsigned long func;
> + atomic_t error;
> +};
> +
> +static void sse_event_per_cpu_func(void *info)
> +{
> + int ret;
> + struct sse_per_cpu_evt *cpu_evt = info;
> +
> + if (cpu_evt->func == SBI_SSE_EVENT_REGISTER)
> + ret = sse_event_register_local(cpu_evt->event);
> + else
> + ret = sse_sbi_event_func(cpu_evt->event, cpu_evt->func);
> +
> + if (ret)
> + atomic_set(&cpu_evt->error, ret);
> +}
> +
> +static void sse_event_free(struct sse_event *event)
> +{
> + unsigned int cpu;
> + struct sse_registered_event *reg_evt;
> +
> + if (sse_event_is_global(event->evt)) {
> + sse_event_free_registered(event->global);
> + kfree(event->global);
> + } else {
> + for_each_possible_cpu(cpu) {
> + reg_evt = per_cpu_ptr(event->local, cpu);
> + sse_event_free_registered(reg_evt);
> + }
> + free_percpu(event->local);
> + }
> +
> + kfree(event);
> +}
> +
> +int sse_event_enable(struct sse_event *event)
> +{
> + int ret = 0;
> + struct sse_per_cpu_evt cpu_evt;
> +
> + scoped_guard(mutex, &sse_mutex) {
> + cpus_read_lock();
> + if (sse_event_is_global(event->evt)) {
> + ret = sse_sbi_enable_event(event);
> + } else {
> + cpu_evt.event = event;
> + atomic_set(&cpu_evt.error, 0);
> + cpu_evt.func = SBI_SSE_EVENT_ENABLE;
> + on_each_cpu(sse_event_per_cpu_func, &cpu_evt, 1);
> + ret = atomic_read(&cpu_evt.error);
> + if (ret) {
> + cpu_evt.func = SBI_SSE_EVENT_DISABLE;
> + on_each_cpu(sse_event_per_cpu_func, &cpu_evt,
> + 1);
nit: this should fit on one line, no?
> + }
> + }
> + cpus_read_unlock();
> +
> + if (ret == 0)
> + event->is_enabled = true;
> + }
> +
> + return ret;
> +}
> 2.45.2
>
Download attachment "signature.asc" of type "application/pgp-signature" (229 bytes)
Powered by blists - more mailing lists