[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAOnJCUKy9FqrWBqQRE8okjcKNZ1NZ1d1hm0sBK_QRQyziHMPew@mail.gmail.com>
Date: Thu, 26 Oct 2023 12:52:22 -0700
From: Atish Patra <atishp@...shpatra.org>
To: Clément Léger <cleger@...osinc.com>
Cc: Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>,
Anup Patel <apatel@...tanamicro.com>,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
Himanshu Chauhan <hchauhan@...tanamicro.com>,
Xu Lu <luxu.kernel@...edance.com>
Subject: Re: [RFC PATCH 3/3] perf: RISC-V: add support for SSE event
On Thu, Oct 26, 2023 at 7:31 AM Clément Léger <cleger@...osinc.com> wrote:
>
> In order to use SSE within PMU drivers, register a SSE handler for the
> local PMU event. There is not a lot of specific code needed to handle
> the SSE event, just reuse the existing overlflow IRQ handler and pass
> appropriate pt_regs.
>
> Signed-off-by: Clément Léger <cleger@...osinc.com>
> ---
> arch/riscv/include/asm/sbi.h | 2 ++
> drivers/perf/riscv_pmu_sbi.c | 51 +++++++++++++++++++++++++++++-------
> 2 files changed, 44 insertions(+), 9 deletions(-)
>
> diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
> index 2e99cafe7fed..13b01cd3a814 100644
> --- a/arch/riscv/include/asm/sbi.h
> +++ b/arch/riscv/include/asm/sbi.h
> @@ -121,6 +121,8 @@ enum sbi_ext_pmu_fid {
> SBI_EXT_PMU_COUNTER_START,
> SBI_EXT_PMU_COUNTER_STOP,
> SBI_EXT_PMU_COUNTER_FW_READ,
> + SBI_EXT_PMU_COUNTER_FW_READ_HI,
> + SBI_EXT_PMU_COUNTER_IRQ_CLEAR,
FWIW: This is not part of the SBI v2.0 or any proposed improvements.
As the SSE spec is in the draft state,
we need to evaluate if this is required or if there are better way to
request the M-mode to clear the interrupt pending bit.
With counter delegation extensions in place, this requires a bit more
thought as it won't use the SBI PMU at all.
In addition to that, there may be other use cases where irq needs to
be cleared first thing in the handler
There are few options which we are looking at :
1. Move the irq processing to the workqueue and call SSE complete
which can clear the irq
2. Define a generic irq clear function in the SSE extension itself to
achieve this if we have to rely on the SBI call.
> };
>
> union sbi_pmu_ctr_info {
> diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
> index 96c7f670c8f0..3fca70b13304 100644
> --- a/drivers/perf/riscv_pmu_sbi.c
> +++ b/drivers/perf/riscv_pmu_sbi.c
> @@ -17,6 +17,7 @@
> #include <linux/irqdomain.h>
> #include <linux/of_irq.h>
> #include <linux/of.h>
> +#include <linux/riscv_sse.h>
> #include <linux/cpu_pm.h>
> #include <linux/sched/clock.h>
>
> @@ -625,6 +626,12 @@ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
> cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
> }
>
> +static void pmu_sbi_irq_clear(void)
> +{
> + /* No need to check the error here as we can't do anything about the error */
> + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_IRQ_CLEAR, 0, 0, 0, 0, 0, 0);
> +}
> +
> /*
> * This function starts all the used counters in two step approach.
> * Any counter that did not overflow can be start in a single step
> @@ -670,10 +677,10 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
> }
> }
>
> -static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
> +static irqreturn_t pmu_sbi_ovf_handler(struct cpu_hw_events *cpu_hw_evt,
> + struct pt_regs *regs, bool from_sse)
> {
> struct perf_sample_data data;
> - struct pt_regs *regs;
> struct hw_perf_event *hw_evt;
> union sbi_pmu_ctr_info *info;
> int lidx, hidx, fidx;
> @@ -681,7 +688,6 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
> struct perf_event *event;
> unsigned long overflow;
> unsigned long overflowed_ctrs = 0;
> - struct cpu_hw_events *cpu_hw_evt = dev;
> u64 start_clock = sched_clock();
>
> if (WARN_ON_ONCE(!cpu_hw_evt))
> @@ -691,7 +697,10 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
> fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
> event = cpu_hw_evt->events[fidx];
> if (!event) {
> - csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
> + if (from_sse)
> + pmu_sbi_irq_clear();
> + else
> + csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
> return IRQ_NONE;
> }
>
> @@ -703,16 +712,16 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
>
> /*
> * Overflow interrupt pending bit should only be cleared after stopping
> - * all the counters to avoid any race condition.
> + * all the counters to avoid any race condition. When using SSE,
> + * interrupt is cleared when stopping counters.
> */
> - csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
> + if (!from_sse)
> + csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
>
> /* No overflow bit is set */
> if (!overflow)
> return IRQ_NONE;
>
> - regs = get_irq_regs();
> -
> for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
> struct perf_event *event = cpu_hw_evt->events[lidx];
>
> @@ -758,6 +767,22 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
> return IRQ_HANDLED;
> }
>
> +static irqreturn_t pmu_sbi_ovf_irq_handler(int irq, void *dev)
> +{
> + return pmu_sbi_ovf_handler(dev, get_irq_regs(), false);
> +}
> +
> +static int pmu_sbi_ovf_sse_handler(uint32_t evt, void *arg,
> + struct pt_regs *regs)
> +{
> + struct cpu_hw_events __percpu *hw_events = arg;
> + struct cpu_hw_events *hw_event = raw_cpu_ptr(hw_events);
> +
> + pmu_sbi_ovf_handler(hw_event, regs, true);
> +
> + return 0;
> +}
> +
> static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
> {
> struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
> @@ -801,9 +826,17 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
> static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
> {
> int ret;
> + struct sse_event *evt;
> struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
> struct irq_domain *domain = NULL;
>
> + evt = sse_event_register(SBI_SSE_EVENT_LOCAL_PMU, 0,
> + pmu_sbi_ovf_sse_handler, hw_events);
> + if (!IS_ERR(evt)) {
> + sse_event_enable(evt);
> + return 0;
> + }
> +
> if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
> riscv_pmu_irq_num = RV_IRQ_PMU;
> riscv_pmu_use_irq = true;
> @@ -831,7 +864,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
> return -ENODEV;
> }
>
> - ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
> + ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_irq_handler, "riscv-pmu", hw_events);
> if (ret) {
> pr_err("registering percpu irq failed [%d]\n", ret);
> return ret;
> --
> 2.42.0
>
--
Regards,
Atish
Powered by blists - more mailing lists