lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180619104323.huwx5n2z53lmtnux@lakrids.cambridge.arm.com>
Date:   Tue, 19 Jun 2018 11:43:23 +0100
From:   Mark Rutland <mark.rutland@....com>
To:     Suzuki K Poulose <suzuki.poulose@....com>
Cc:     linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
        will.deacon@....com, robin.murphy@....com, julien.thierry@....com
Subject: Re: [PATCH v3 6/7] arm64: perf: Disable PMU while processing counter
 overflows

On Tue, Jun 19, 2018 at 11:15:41AM +0100, Suzuki K Poulose wrote:
> The arm64 PMU updates the event counters and reprograms the
> counters in the overflow IRQ handler without disabling the
> PMU. This could potentially cause skews in for group counters,
> where the overflowed counters may potentially loose some event
> counts, while they are reprogrammed. To prevent this, disable
> the PMU while we process the counter overflows and enable it
> right back when we are done.
> 
> This patch also moves the PMU stop/start routines to avoid a
> forward declaration.
> 
> Suggested-by: Mark Rutland <mark.rutland@....com>
> Cc: Will Deacon <will.deacon@....com>
> Signed-off-by: Suzuki K Poulose <suzuki.poulose@....com>

Acked-by: Mark Rutland <mark.rutland@....com>

This makes me realise that we could remove the pmu_lock, but that's not
a new problem, and we can address that separately.

Thanks,
Mark.

> ---
>  arch/arm64/kernel/perf_event.c | 50 +++++++++++++++++++++++-------------------
>  1 file changed, 28 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
> index 9ce3729..eebc635 100644
> --- a/arch/arm64/kernel/perf_event.c
> +++ b/arch/arm64/kernel/perf_event.c
> @@ -678,6 +678,28 @@ static void armv8pmu_disable_event(struct perf_event *event)
>  	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
>  }
>  
> +static void armv8pmu_start(struct arm_pmu *cpu_pmu)
> +{
> +	unsigned long flags;
> +	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
> +
> +	raw_spin_lock_irqsave(&events->pmu_lock, flags);
> +	/* Enable all counters */
> +	armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
> +	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
> +}
> +
> +static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
> +{
> +	unsigned long flags;
> +	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
> +
> +	raw_spin_lock_irqsave(&events->pmu_lock, flags);
> +	/* Disable all counters */
> +	armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
> +	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
> +}
> +
>  static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
>  {
>  	u32 pmovsr;
> @@ -702,6 +724,11 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
>  	 */
>  	regs = get_irq_regs();
>  
> +	/*
> +	 * Stop the PMU while processing the counter overflows
> +	 * to prevent skews in group events.
> +	 */
> +	armv8pmu_stop(cpu_pmu);
>  	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
>  		struct perf_event *event = cpuc->events[idx];
>  		struct hw_perf_event *hwc;
> @@ -726,6 +753,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
>  		if (perf_event_overflow(event, &data, regs))
>  			cpu_pmu->disable(event);
>  	}
> +	armv8pmu_start(cpu_pmu);
>  
>  	/*
>  	 * Handle the pending perf events.
> @@ -739,28 +767,6 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
>  	return IRQ_HANDLED;
>  }
>  
> -static void armv8pmu_start(struct arm_pmu *cpu_pmu)
> -{
> -	unsigned long flags;
> -	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
> -
> -	raw_spin_lock_irqsave(&events->pmu_lock, flags);
> -	/* Enable all counters */
> -	armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
> -	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
> -}
> -
> -static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
> -{
> -	unsigned long flags;
> -	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
> -
> -	raw_spin_lock_irqsave(&events->pmu_lock, flags);
> -	/* Disable all counters */
> -	armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
> -	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
> -}
> -
>  static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
>  				  struct perf_event *event)
>  {
> -- 
> 2.7.4
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ