[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANr2M1_Jzw1Z6A2Gw4DgcKiZqUd1-Bzn-GM+g1CCHgQP597FBQ@mail.gmail.com>
Date: Sun, 17 May 2020 14:39:13 +0800
From: Lecopzer Chen <lecopzer@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Jian-Lin Chen <lecopzer.chen@...iatek.com>,
linux-arm-kernel@...ts.infradead.org, matthias.bgg@...il.com,
catalin.marinas@....com, will@...nel.org, mark.rutland@....com,
mingo@...hat.com, acme@...nel.org, jolsa@...hat.com,
namhyung@...nel.org, linux-mediatek@...ts.infradead.org,
alexander.shishkin@...ux.intel.com, peterz@...radead.org,
yj.chiang@...iatek.com
Subject: Re: [PATCH 1/3] arm_pmu: Add support for perf NMI interrupts registration
There was some mistakes when merging this patch.
The free nmi part is not present :(
The following part will be added in V2 next weekend.
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index fa37b72d19e2..aa9ed09e5303 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -544,6 +544,38 @@ static int armpmu_count_irq_users(const int irq)
return count;
}
+static void armpmu_teardown_percpu_nmi_other(void* info)
+{
+ /*
+ * We don't need to disable preemption since smp_call_function()
+ * did this for us.
+ */
+ teardown_percpu_nmi((uintptr_t) info);
+}
+
+static void _armpmu_free_irq(unsigned int irq, void *dev_id)
+{
+ if (armpmu_support_nmi())
+ free_nmi(irq, dev_id);
+ else
+ free_irq(irq, dev_id);
+}
+
+static void _armpmu_free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+ if (armpmu_support_nmi()) {
+ preempt_disable();
+ teardown_percpu_nmi(irq);
+ smp_call_function(armpmu_teardown_percpu_nmi_other,
+ (void *)(uintptr_t) irq, true);
+ preempt_enable();
+
+ free_percpu_nmi(irq, dev_id);
+ }
+ else
+ free_percpu_irq(irq, dev_id);
+}
+
void armpmu_free_irq(int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
@@ -552,9 +584,9 @@ void armpmu_free_irq(int irq, int cpu)
return;
if (!irq_is_percpu_devid(irq))
- free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
+ _armpmu_free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
else if (armpmu_count_irq_users(irq) == 1)
- free_percpu_irq(irq, &cpu_armpmu);
+ _armpmu_free_percpu_irq(irq, &cpu_armpmu);
per_cpu(cpu_irq, cpu) = 0;
}
Thanks,
Lecopzer
Lecopzer Chen <lecopzer@...il.com> 於 2020年5月16日 週六 下午8:50寫道:
>
> Register perf interrupts by request_nmi()/percpu_nmi() when both
> ARM64_PSEUDO_NMI and ARM64_PSEUDO_NMI_PERF are enabled and nmi
> cpufreature is active.
>
> Signed-off-by: Lecopzer Chen <lecopzer.chen@...iatek.com>
> ---
> drivers/perf/arm_pmu.c | 51 +++++++++++++++++++++++++++++++-----
> include/linux/perf/arm_pmu.h | 6 +++++
> 2 files changed, 51 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
> index df352b334ea7..fa37b72d19e2 100644
> --- a/drivers/perf/arm_pmu.c
> +++ b/drivers/perf/arm_pmu.c
> @@ -559,6 +559,48 @@ void armpmu_free_irq(int irq, int cpu)
> per_cpu(cpu_irq, cpu) = 0;
> }
>
> +static void armpmu_prepare_percpu_nmi_other(void *info)
> +{
> + /*
> + * We don't need to disable preemption since smp_call_function()
> + * did this for us.
> + */
> + prepare_percpu_nmi((uintptr_t) info);
> +}
> +
> +static int _armpmu_request_irq(unsigned int irq, irq_handler_t handler,
> + unsigned long flags, int cpu)
> +{
> + if (armpmu_support_nmi())
> + return request_nmi(irq, handler, flags, "arm-pmu",
> + per_cpu_ptr(&cpu_armpmu, cpu));
> + return request_irq(irq, handler, flags, "arm-pmu",
> + per_cpu_ptr(&cpu_armpmu, cpu));
> +}
> +
> +static int _armpmu_request_percpu_irq(unsigned int irq, irq_handler_t handler)
> +{
> + if (armpmu_support_nmi()) {
> + int err;
> +
> + err = request_percpu_nmi(irq, handler, "arm-pmu",
> + &cpu_armpmu);
> + if (err)
> + return err;
> +
> + preempt_disable();
> + err = prepare_percpu_nmi(irq);
> + if (err) {
> + return err;
> + preempt_enable();
> + }
> + smp_call_function(armpmu_prepare_percpu_nmi_other,
> + (void *)(uintptr_t) irq, true);
> + preempt_enable();
> + }
> + return request_percpu_irq(irq, handler, "arm-pmu",
> + &cpu_armpmu);
> +}
> +
> int armpmu_request_irq(int irq, int cpu)
> {
> int err = 0;
> @@ -582,12 +624,9 @@ int armpmu_request_irq(int irq, int cpu)
> IRQF_NO_THREAD;
>
> irq_set_status_flags(irq, IRQ_NOAUTOEN);
> - err = request_irq(irq, handler, irq_flags, "arm-pmu",
> - per_cpu_ptr(&cpu_armpmu, cpu));
> - } else if (armpmu_count_irq_users(irq) == 0) {
> - err = request_percpu_irq(irq, handler, "arm-pmu",
> - &cpu_armpmu);
> - }
> + err = _armpmu_request_irq(irq, handler, irq_flags, cpu);
> + } else if (armpmu_count_irq_users(irq) == 0)
> + err = _armpmu_request_percpu_irq(irq, handler);
>
> if (err)
> goto err_out;
> diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
> index 5b616dde9a4c..5b878b5a22aa 100644
> --- a/include/linux/perf/arm_pmu.h
> +++ b/include/linux/perf/arm_pmu.h
> @@ -160,6 +160,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
> static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
> #endif
>
> +static inline bool armpmu_support_nmi(void)
> +{
> + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI_PERF) &&
> + system_uses_irq_prio_masking();
> +}
> +
> /* Internal functions only for core arm_pmu code */
> struct arm_pmu *armpmu_alloc(void);
> struct arm_pmu *armpmu_alloc_atomic(void);
> --
> 2.25.1
>
Powered by blists - more mailing lists