lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200921135510.GM2139@willie-the-truck>
Date:   Mon, 21 Sep 2020 14:55:11 +0100
From:   Will Deacon <will@...nel.org>
To:     Alexandru Elisei <alexandru.elisei@....com>
Cc:     linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
        mark.rutland@....com, maz@...nel.org, catalin.marinas@....com,
        swboyd@...omium.org, sumit.garg@...aro.org,
        Julien Thierry <julien.thierry@....com>,
        Julien Thierry <julien.thierry.kdev@...il.com>,
        Will Deacon <will.deacon@....com>
Subject: Re: [PATCH v6 6/7] arm_pmu: Introduce pmu_irq_ops

On Wed, Aug 19, 2020 at 02:34:18PM +0100, Alexandru Elisei wrote:
> From: Julien Thierry <julien.thierry@....com>
> 
> Currently the PMU interrupt can either be a normal irq or a percpu irq.
> Supporting NMI will introduce two cases for each existing one. It becomes
> a mess of 'if's when managing the interrupt.
> 
> Define sets of callbacks for operations commonly done on the interrupt. The
> appropriate set of callbacks is selected at interrupt request time and
> simplifies interrupt enabling/disabling and freeing.
> 
> Cc: Julien Thierry <julien.thierry.kdev@...il.com>
> Cc: Will Deacon <will.deacon@....com>
> Cc: Mark Rutland <mark.rutland@....com>
> Signed-off-by: Julien Thierry <julien.thierry@....com>
> Signed-off-by: Alexandru Elisei <alexandru.elisei@....com>
> ---
>  drivers/perf/arm_pmu.c | 86 ++++++++++++++++++++++++++++++++++--------
>  1 file changed, 70 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
> index df352b334ea7..17e5952d21e4 100644
> --- a/drivers/perf/arm_pmu.c
> +++ b/drivers/perf/arm_pmu.c
> @@ -26,8 +26,46 @@
>  
>  #include <asm/irq_regs.h>
>  
> +static int armpmu_count_irq_users(const int irq);
> +
> +struct pmu_irq_ops {
> +	void (*enable_pmuirq)(unsigned int irq);
> +	void (*disable_pmuirq)(unsigned int irq);
> +	void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
> +};
> +
> +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
> +{
> +	free_irq(irq, per_cpu_ptr(devid, cpu));
> +}
> +
> +static const struct pmu_irq_ops pmuirq_ops = {
> +	.enable_pmuirq = enable_irq,
> +	.disable_pmuirq = disable_irq_nosync,
> +	.free_pmuirq = armpmu_free_pmuirq
> +};
> +
> +static void armpmu_enable_percpu_pmuirq(unsigned int irq)
> +{
> +	enable_percpu_irq(irq, IRQ_TYPE_NONE);
> +}
> +
> +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
> +				   void __percpu *devid)
> +{
> +	if (armpmu_count_irq_users(irq) == 1)
> +		free_percpu_irq(irq, devid);
> +}
> +
> +static const struct pmu_irq_ops percpu_pmuirq_ops = {
> +	.enable_pmuirq = armpmu_enable_percpu_pmuirq,
> +	.disable_pmuirq = disable_percpu_irq,
> +	.free_pmuirq = armpmu_free_percpu_pmuirq
> +};
> +
>  static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
>  static DEFINE_PER_CPU(int, cpu_irq);
> +static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);

Would it make sense to put this in a structure alongside the irq?

>  
>  static inline u64 arm_pmu_event_max_period(struct perf_event *event)
>  {
> @@ -544,6 +582,19 @@ static int armpmu_count_irq_users(const int irq)
>  	return count;
>  }
>  
> +static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
> +{
> +	int cpu;
> +
> +	for_each_possible_cpu(cpu) {
> +		if (per_cpu(cpu_irq, cpu) == irq
> +		    && per_cpu(cpu_irq_ops, cpu))
> +			return per_cpu(cpu_irq_ops, cpu);
> +	}

nit, but you could make this a bit more readable:

	struct pmu_irq_ops *ops = NULL;

	for_each_possible_cpu(cpu) {
		if (per_cpu(cpu_irq, cpu) != irq)
			continue;

		ops = per_cpu(cpu_irq_ops, cpu);
		if (ops)
			break;
	}

	return ops;

Will

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ