[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZmbfE_0n65lMxRb1@J2N7QTR9R3>
Date: Mon, 10 Jun 2024 12:10:11 +0100
From: Mark Rutland <mark.rutland@....com>
To: "Rob Herring (Arm)" <robh@...nel.org>
Cc: Russell King <linux@...linux.org.uk>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>, Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Oliver Upton <oliver.upton@...ux.dev>,
James Morse <james.morse@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
Catalin Marinas <catalin.marinas@....com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org, kvmarm@...ts.linux.dev
Subject: Re: [PATCH 6/9] KVM: arm64: pmu: Use generated define for
PMSELR_EL0.SEL access
On Fri, Jun 07, 2024 at 02:31:31PM -0600, Rob Herring (Arm) wrote:
> ARMV8_PMU_COUNTER_MASK is really a mask for the PMSELR_EL0.SEL register
> field. Make that clear by adding a standard sysreg definition for the
> register, and using it instead.
>
> Signed-off-by: Rob Herring (Arm) <robh@...nel.org>
> ---
> arch/arm64/include/asm/sysreg.h | 1 -
> arch/arm64/kvm/sys_regs.c | 10 +++++-----
> arch/arm64/tools/sysreg | 5 +++++
> include/linux/perf/arm_pmuv3.h | 1 -
> 4 files changed, 10 insertions(+), 7 deletions(-)
This looks good to me; I checked the reg values match those in the
latest ARM ARM (ARM DDI 0487K.a pages 9016 to 9018), and they also match
the eixsting values in the kernel. The changes to use the new mask name
and to use SYS_FIELD_GET() all look good to me.
Reviewed-by: Mark Rutland <mark.rutland@....com>
Acked-by: Mark Rutland <mark.rutland@....com>
Mark.
>
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index af3b206fa423..b0d6c33f9ecc 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -403,7 +403,6 @@
> #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
> #define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3)
> #define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4)
> -#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5)
> #define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6)
> #define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
> #define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0)
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 22b45a15d068..f8b5db48ea8a 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -880,7 +880,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> {
> reset_unknown(vcpu, r);
> - __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
> + __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK;
>
> return __vcpu_sys_reg(vcpu, r->reg);
> }
> @@ -972,7 +972,7 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> else
> /* return PMSELR.SEL field */
> p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
> - & ARMV8_PMU_COUNTER_MASK;
> + & PMSELR_EL0_SEL_MASK;
>
> return true;
> }
> @@ -1040,8 +1040,8 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
> if (pmu_access_event_counter_el0_disabled(vcpu))
> return false;
>
> - idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
> - & ARMV8_PMU_COUNTER_MASK;
> + idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
> + __vcpu_sys_reg(vcpu, PMSELR_EL0));
> } else if (r->Op2 == 0) {
> /* PMCCNTR_EL0 */
> if (pmu_access_cycle_counter_el0_disabled(vcpu))
> @@ -1091,7 +1091,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>
> if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
> /* PMXEVTYPER_EL0 */
> - idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
> + idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
> reg = PMEVTYPER0_EL0 + idx;
> } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
> idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
> diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
> index a4c1dd4741a4..231817a379b5 100644
> --- a/arch/arm64/tools/sysreg
> +++ b/arch/arm64/tools/sysreg
> @@ -2153,6 +2153,11 @@ Field 4 P
> Field 3:0 ALIGN
> EndSysreg
>
> +Sysreg PMSELR_EL0 3 3 9 12 5
> +Res0 63:5
> +Field 4:0 SEL
> +EndSysreg
> +
> SysregFields CONTEXTIDR_ELx
> Res0 63:32
> Field 31:0 PROCID
> diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
> index 46377e134d67..caa09241ad4f 100644
> --- a/include/linux/perf/arm_pmuv3.h
> +++ b/include/linux/perf/arm_pmuv3.h
> @@ -7,7 +7,6 @@
> #define __PERF_ARM_PMUV3_H
>
> #define ARMV8_PMU_MAX_COUNTERS 32
> -#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
>
> /*
> * Common architectural and microarchitectural event numbers.
>
> --
> 2.43.0
>
Powered by blists - more mailing lists