[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAL_JsqKT3gXJQLNr+H8D8nmEeGuDxS8iOje6yaCs+ne-4FcCvg@mail.gmail.com>
Date: Thu, 24 Oct 2024 11:41:11 -0500
From: Rob Herring <robh@...nel.org>
To: Will Deacon <will@...nel.org>
Cc: Marc Zyngier <maz@...nel.org>, Russell King <linux@...linux.org.uk>,
Catalin Marinas <catalin.marinas@....com>, Mark Rutland <mark.rutland@....com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] perf/arm_pmuv3: Add PMUv3.9 per counter EL0 access control
On Thu, Oct 24, 2024 at 5:40 AM Will Deacon <will@...nel.org> wrote:
>
> On Wed, Oct 02, 2024 at 01:43:24PM -0500, Rob Herring (Arm) wrote:
> > Armv8.9/9.4 PMUv3.9 adds per counter EL0 access controls. Per counter
> > access is enabled with the UEN bit in PMUSERENR_EL1 register. Individual
> > counters are enabled/disabled in the PMUACR_EL1 register. When UEN is
> > set, the CR/ER bits control EL0 write access and must be set to disable
> > write access.
> >
> > With the access controls, the clearing of unused counters can be
> > skipped.
> >
> > KVM also configures PMUSERENR_EL1 in order to trap to EL2. UEN does not
> > need to be set for it since only PMUv3.5 is exposed to guests.
> >
> > Signed-off-by: Rob Herring (Arm) <robh@...nel.org>
> > ---
> > arch/arm/include/asm/arm_pmuv3.h | 6 ++++++
> > arch/arm64/include/asm/arm_pmuv3.h | 10 ++++++++++
> > arch/arm64/tools/sysreg | 8 ++++++++
> > drivers/perf/arm_pmuv3.c | 29 +++++++++++++++++++----------
> > include/linux/perf/arm_pmuv3.h | 1 +
> > 5 files changed, 44 insertions(+), 10 deletions(-)
> >
> > diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
> > index f63ba8986b24..d242b5e1ca0d 100644
> > --- a/arch/arm/include/asm/arm_pmuv3.h
> > +++ b/arch/arm/include/asm/arm_pmuv3.h
> > @@ -231,6 +231,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
> > #define ARMV8_PMU_DFR_VER_V3P1 0x4
> > #define ARMV8_PMU_DFR_VER_V3P4 0x5
> > #define ARMV8_PMU_DFR_VER_V3P5 0x6
> > +#define ARMV8_PMU_DFR_VER_V3P9 0x9
> > #define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
> >
> > static inline bool pmuv3_implemented(int pmuver)
> > @@ -249,6 +250,11 @@ static inline bool is_pmuv3p5(int pmuver)
> > return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
> > }
> >
> > +static inline bool is_pmuv3p9(int pmuver)
> > +{
> > + return pmuver >= ARMV8_PMU_DFR_VER_V3P9;
> > +}
> > +
> > static inline u64 read_pmceid0(void)
> > {
> > u64 val = read_sysreg(PMCEID0);
> > diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
> > index 468a049bc63b..8a777dec8d88 100644
> > --- a/arch/arm64/include/asm/arm_pmuv3.h
> > +++ b/arch/arm64/include/asm/arm_pmuv3.h
> > @@ -152,6 +152,11 @@ static inline void write_pmuserenr(u32 val)
> > write_sysreg(val, pmuserenr_el0);
> > }
> >
> > +static inline void write_pmuacr(u64 val)
> > +{
> > + write_sysreg_s(val, SYS_PMUACR_EL1);
> > +}
> > +
> > static inline u64 read_pmceid0(void)
> > {
> > return read_sysreg(pmceid0_el0);
> > @@ -178,4 +183,9 @@ static inline bool is_pmuv3p5(int pmuver)
> > return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;
> > }
> >
> > +static inline bool is_pmuv3p9(int pmuver)
> > +{
> > + return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P9;
> > +}
> > +
> > #endif
> > diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
> > index 8d637ac4b7c6..74fb5af91d4f 100644
> > --- a/arch/arm64/tools/sysreg
> > +++ b/arch/arm64/tools/sysreg
> > @@ -1238,6 +1238,7 @@ UnsignedEnum 11:8 PMUVer
> > 0b0110 V3P5
> > 0b0111 V3P7
> > 0b1000 V3P8
> > + 0b1001 V3P9
> > 0b1111 IMP_DEF
> > EndEnum
> > UnsignedEnum 7:4 TraceVer
> > @@ -2178,6 +2179,13 @@ Field 4 P
> > Field 3:0 ALIGN
> > EndSysreg
> >
> > +Sysreg PMUACR_EL1 3 0 9 14 4
> > +Res0 63:33
> > +Field 32 F0
> > +Field 31 C
> > +Field 30:0 P
> > +EndSysreg
> > +
> > Sysreg PMSELR_EL0 3 3 9 12 5
> > Res0 63:5
> > Field 4:0 SEL
> > diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
> > index 0afe02f879b4..bb93d32b86ea 100644
> > --- a/drivers/perf/arm_pmuv3.c
> > +++ b/drivers/perf/arm_pmuv3.c
> > @@ -770,18 +770,27 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
> > int i;
> > struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
> >
> > - /* Clear any unused counters to avoid leaking their contents */
> > - for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
> > - ARMPMU_MAX_HWEVENTS) {
> > - if (i == ARMV8_PMU_CYCLE_IDX)
> > - write_pmccntr(0);
> > - else if (i == ARMV8_PMU_INSTR_IDX)
> > - write_pmicntr(0);
> > - else
> > - armv8pmu_write_evcntr(i, 0);
> > + if (is_pmuv3p9(cpu_pmu->pmuver)) {
> > + u64 mask = 0;
> > + for_each_set_bit(i, cpuc->used_mask, ARMPMU_MAX_HWEVENTS) {
> > + if (armv8pmu_event_has_user_read(cpuc->events[i]))
> > + mask |= BIT(i);
> > + }
> > + write_pmuacr(mask);
>
> Since this is a new register, should we be zeroing it as part of our
> reset callback?
That should not be necessary since EL0 access is gated off in PMUSEREN
in general and enabling this register additionally requires setting
the UEN bit. That's only done right after this.
But if the policy is to initialize all registers with unknown reset
state, then yes, I can add that.
Rob
Powered by blists - more mailing lists