[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8450a182-5c62-4546-ab91-5d39eb252254@linaro.org>
Date: Mon, 24 Mar 2025 14:52:26 +0000
From: James Clark <james.clark@...aro.org>
To: Colton Lewis <coltonlewis@...gle.com>, kvm@...r.kernel.org
Cc: Russell King <linux@...linux.org.uk>,
Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>,
Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>,
Joey Gouly <joey.gouly@....com>, Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>, Mark Rutland <mark.rutland@....com>,
Paolo Bonzini <pbonzini@...hat.com>, Shuah Khan <shuah@...nel.org>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
kvmarm@...ts.linux.dev, linux-perf-users@...r.kernel.org,
linux-kselftest@...r.kernel.org
Subject: Re: [RFC PATCH v3 7/8] perf: arm_pmuv3: Keep out of guest counter
partition
On 13/02/2025 6:03 pm, Colton Lewis wrote:
> If the PMU is partitioned, keep the driver out of the guest counter
> partition and only use the host counter partition. Partitioning is
> defined by the MDCR_EL2.HPMN register field and saved in
> cpu_pmu->hpmn. The range 0..HPMN-1 is accessible by EL1 and EL0 while
> HPMN..PMCR.N is reserved for EL2.
>
> Define some macros that take HPMN as an argument and construct
> mutually exclusive bitmaps for testing which partition a particular
> counter is in. Note that despite their different position in the
> bitmap, the cycle and instruction counters are always in the guest
> partition.
>
> Signed-off-by: Colton Lewis <coltonlewis@...gle.com>
> ---
> arch/arm/include/asm/arm_pmuv3.h | 2 +
> arch/arm64/include/asm/kvm_pmu.h | 5 +++
> arch/arm64/kvm/pmu-part.c | 16 +++++++
> drivers/perf/arm_pmuv3.c | 73 +++++++++++++++++++++++++++-----
> include/linux/perf/arm_pmuv3.h | 8 ++++
> 5 files changed, 94 insertions(+), 10 deletions(-)
>
> diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
> index 2ec0e5e83fc9..dadd4ddf51af 100644
> --- a/arch/arm/include/asm/arm_pmuv3.h
> +++ b/arch/arm/include/asm/arm_pmuv3.h
> @@ -227,6 +227,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
> }
>
> static inline void kvm_vcpu_pmu_resync_el0(void) {}
> +static inline void kvm_pmu_host_counters_enable(void) {}
> +static inline void kvm_pmu_host_counters_disable(void) {}
>
> /* PMU Version in DFR Register */
> #define ARMV8_PMU_DFR_VER_NI 0
> diff --git a/arch/arm64/include/asm/kvm_pmu.h b/arch/arm64/include/asm/kvm_pmu.h
> index 174b7f376d95..8f25754fde47 100644
> --- a/arch/arm64/include/asm/kvm_pmu.h
> +++ b/arch/arm64/include/asm/kvm_pmu.h
> @@ -25,6 +25,8 @@ void kvm_host_pmu_init(struct arm_pmu *pmu);
> u8 kvm_pmu_get_reserved_counters(void);
> u8 kvm_pmu_hpmn(u8 nr_counters);
> void kvm_pmu_partition(struct arm_pmu *pmu);
> +void kvm_pmu_host_counters_enable(void);
> +void kvm_pmu_host_counters_disable(void);
>
> #else
>
> @@ -37,6 +39,9 @@ static inline bool kvm_set_pmuserenr(u64 val)
> static inline void kvm_vcpu_pmu_resync_el0(void) {}
> static inline void kvm_host_pmu_init(struct arm_pmu *pmu) {}
>
> +static inline void kvm_pmu_host_counters_enable(void) {}
> +static inline void kvm_pmu_host_counters_disable(void) {}
> +
> #endif
>
> #endif
> diff --git a/arch/arm64/kvm/pmu-part.c b/arch/arm64/kvm/pmu-part.c
> index e74fecc67e37..51da65c678f9 100644
> --- a/arch/arm64/kvm/pmu-part.c
> +++ b/arch/arm64/kvm/pmu-part.c
> @@ -45,3 +45,19 @@ void kvm_pmu_partition(struct arm_pmu *pmu)
> pmu->partitioned = false;
> }
> }
> +
> +void kvm_pmu_host_counters_enable(void)
> +{
> + u64 mdcr = read_sysreg(mdcr_el2);
> +
> + mdcr |= MDCR_EL2_HPME;
> + write_sysreg(mdcr, mdcr_el2);
> +}
> +
> +void kvm_pmu_host_counters_disable(void)
> +{
> + u64 mdcr = read_sysreg(mdcr_el2);
> +
> + mdcr &= ~MDCR_EL2_HPME;
> + write_sysreg(mdcr, mdcr_el2);
> +}
> diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
> index 0e360feb3432..442dcff56d5b 100644
> --- a/drivers/perf/arm_pmuv3.c
> +++ b/drivers/perf/arm_pmuv3.c
> @@ -730,15 +730,19 @@ static void armv8pmu_disable_event_irq(struct perf_event *event)
> armv8pmu_disable_intens(BIT(event->hw.idx));
> }
>
> -static u64 armv8pmu_getreset_flags(void)
> +static u64 armv8pmu_getreset_flags(struct arm_pmu *cpu_pmu)
> {
> u64 value;
>
> /* Read */
> value = read_pmovsclr();
>
> + if (cpu_pmu->partitioned)
> + value &= ARMV8_PMU_HOST_CNT_PART(cpu_pmu->hpmn);
> + else
> + value &= ARMV8_PMU_OVERFLOWED_MASK;
> +
> /* Write to clear flags */
> - value &= ARMV8_PMU_OVERFLOWED_MASK;
> write_pmovsclr(value);
>
> return value;
> @@ -765,6 +769,18 @@ static void armv8pmu_disable_user_access(void)
> update_pmuserenr(0);
> }
>
> +static bool armv8pmu_is_guest_part(struct arm_pmu *cpu_pmu, u8 idx)
> +{
> + return cpu_pmu->partitioned &&
> + (BIT(idx) & ARMV8_PMU_GUEST_CNT_PART(cpu_pmu->hpmn));
> +}
> +
> +static bool armv8pmu_is_host_part(struct arm_pmu *cpu_pmu, u8 idx)
> +{
> + return !cpu_pmu->partitioned ||
> + (BIT(idx) & ARMV8_PMU_HOST_CNT_PART(cpu_pmu->hpmn));
> +}
> +
> static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
> {
> int i;
> @@ -773,6 +789,8 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
> if (is_pmuv3p9(cpu_pmu->pmuver)) {
> u64 mask = 0;
> for_each_set_bit(i, cpuc->used_mask, ARMPMU_MAX_HWEVENTS) {
> + if (armv8pmu_is_guest_part(cpu_pmu, i))
> + continue;
Hi Colton,
Is it possible to keep the guest bits out of used_mask and cntr_mask in
the first place? Then all these loops don't need to have the logic for
is_guest_part()/is_host_part().
That leads me to wonder about updating the printout:
hw perfevents: enabled with armv8_pmuv3_0 PMU driver, 7 (0,8000003f)
counters available
It might be a bit confusing if that doesn't quite reflect reality anymore.
Thanks
James
Powered by blists - more mailing lists