lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <864j0psuas.wl-maz@kernel.org>
Date: Wed, 19 Feb 2025 17:44:59 +0000
From: Marc Zyngier <maz@...nel.org>
To: Oliver Upton <oliver.upton@...ux.dev>
Cc: kvmarm@...ts.linux.dev,
	Joey Gouly <joey.gouly@....com>,
	Suzuki K Poulose <suzuki.poulose@....com>,
	Zenghui Yu <yuzenghui@...wei.com>,
	Mingwei Zhang <mizhang@...gle.com>,
	Colton Lewis <coltonlewis@...gle.com>,
	Raghavendra Rao Ananta <rananta@...gle.com>,
	Catalin Marinas <catalin.marinas@....com>,
	Will Deacon <will@...nel.org>,
	Mark Rutland <mark.rutland@....com>,
	linux-arm-kernel@...ts.infradead.org,
	linux-kernel@...r.kernel.org,
	Janne Grunau <j@...nau.net>
Subject: Re: [PATCH v2 07/14] KVM: arm64: Use a cpucap to determine if system supports FEAT_PMUv3

On Mon, 03 Feb 2025 18:31:04 +0000,
Oliver Upton <oliver.upton@...ux.dev> wrote:
> 
> KVM is about to learn some new tricks to virtualize PMUv3 on IMPDEF
> hardware. As part of that, we now need to differentiate host support
> from guest support for PMUv3.
> 
> Add a cpucap to determine if an architectural PMUv3 is present to guard
> host usage of PMUv3 controls.
> 
> Tested-by: Janne Grunau <j@...nau.net>
> Signed-off-by: Oliver Upton <oliver.upton@...ux.dev>
> ---
>  arch/arm64/include/asm/cpufeature.h     |  5 +++++
>  arch/arm64/kernel/cpufeature.c          | 19 +++++++++++++++++++
>  arch/arm64/kvm/hyp/include/hyp/switch.h |  4 ++--
>  arch/arm64/kvm/pmu.c                    | 10 +++++-----
>  arch/arm64/tools/cpucaps                |  1 +
>  include/kvm/arm_pmu.h                   |  2 +-
>  6 files changed, 33 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index e0e4478f5fb5..0eff048848b8 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -866,6 +866,11 @@ static __always_inline bool system_supports_mpam_hcr(void)
>  	return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
>  }
>  
> +static inline bool system_supports_pmuv3(void)
> +{
> +	return cpus_have_final_cap(ARM64_HAS_PMUV3);
> +}
> +
>  int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
>  bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
>  
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 4eb7c6698ae4..6886d2875fac 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -1898,6 +1898,19 @@ static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
>  }
>  #endif
>  
> +static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
> +{
> +	u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
> +	unsigned int pmuver;
> +
> +	pmuver = cpuid_feature_extract_unsigned_field(dfr0,
> +						      ID_AA64DFR0_EL1_PMUVer_SHIFT);
> +	if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
> +		return false;
> +
> +	return pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP;

Given that PMUVer is a signed field, how about using
cpuid_feature_extract_signed_field() and do a signed comparison instead?

> +}
> +
>  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>  #define KPTI_NG_TEMP_VA		(-(1UL << PMD_SHIFT))
>  
> @@ -2999,6 +3012,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>  		ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, GCS, IMP)
>  	},
>  #endif
> +	{
> +		.desc = "PMUv3",
> +		.capability = ARM64_HAS_PMUV3,
> +		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
> +		.matches = has_pmuv3,
> +	},

This cap is probed unconditionally (without any configuration
dependency)...

>  	{},
>  };
>  
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index f838a45665f2..0edc7882bedb 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -244,7 +244,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
>  	 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
>  	 * EL1 instead of being trapped to EL2.
>  	 */
> -	if (kvm_arm_support_pmu_v3()) {
> +	if (system_supports_pmuv3()) {

... but kvm_arm_support_pmu_v3() is conditional on
CONFIG_HW_PERF_EVENTS.  Doesn't this create some sort of new code path
that we didn't expect?

Thanks,

	M.

-- 
Without deviation from the norm, progress is not possible.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ