lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <48aeb132-82bb-1f13-1c12-abf924dbb1d2@arm.com>
Date:   Thu, 5 Oct 2023 17:48:54 +0100
From:   Suzuki K Poulose <suzuki.poulose@....com>
To:     James Clark <james.clark@....com>, coresight@...ts.linaro.org,
        linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
        broonie@...nel.org, maz@...nel.org
Cc:     Oliver Upton <oliver.upton@...ux.dev>,
        James Morse <james.morse@....com>,
        Zenghui Yu <yuzenghui@...wei.com>,
        Catalin Marinas <catalin.marinas@....com>,
        Will Deacon <will@...nel.org>,
        Mike Leach <mike.leach@...aro.org>,
        Leo Yan <leo.yan@...aro.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Anshuman Khandual <anshuman.khandual@....com>,
        Rob Herring <robh@...nel.org>,
        Jintack Lim <jintack.lim@...aro.org>,
        Akihiko Odaki <akihiko.odaki@...nix.com>,
        Fuad Tabba <tabba@...gle.com>, Joey Gouly <joey.gouly@....com>,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 3/6] arm64: KVM: Move SPE and trace registers to the
 sysreg array

On 05/10/2023 13:57, James Clark wrote:
> pmscr_el1 and trfcr_el1 are currently special cased in the
> host_debug_state struct, but they're just registers after all so give
> them entries in the sysreg array and refer to them through the host
> context.
> 
> Signed-off-by: James Clark <james.clark@....com>

Looks good to me.

Reviewed-by: Suzuki K Poulose <suzuki.poulose@....com>


> ---
>   arch/arm64/include/asm/kvm_host.h  |  6 ++--
>   arch/arm64/include/asm/kvm_hyp.h   |  4 +--
>   arch/arm64/kvm/hyp/nvhe/debug-sr.c | 44 +++++++++++++++---------------
>   arch/arm64/kvm/hyp/nvhe/switch.c   |  4 +--
>   4 files changed, 28 insertions(+), 30 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index e36f7e8a76ce..b5200f199692 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -439,6 +439,8 @@ enum vcpu_sysreg {
>   	CNTHP_CVAL_EL2,
>   	CNTHV_CTL_EL2,
>   	CNTHV_CVAL_EL2,
> +	PMSCR_EL1,	/* Statistical profiling extension */
> +	TRFCR_EL1,	/* Self-hosted trace filters */
>   
>   	NR_SYS_REGS	/* Nothing after this line! */
>   };
> @@ -572,10 +574,6 @@ struct kvm_vcpu_arch {
>   	struct {
>   		/* {Break,watch}point registers */
>   		struct kvm_guest_debug_arch regs;
> -		/* Statistical profiling extension */
> -		u64 pmscr_el1;
> -		/* Self-hosted trace */
> -		u64 trfcr_el1;
>   	} host_debug_state;
>   
>   	/* VGIC state */
> diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
> index b7238c72a04c..37e238f526d7 100644
> --- a/arch/arm64/include/asm/kvm_hyp.h
> +++ b/arch/arm64/include/asm/kvm_hyp.h
> @@ -103,8 +103,8 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
>   void __debug_switch_to_host(struct kvm_vcpu *vcpu);
>   
>   #ifdef __KVM_NVHE_HYPERVISOR__
> -void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
> -void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
> +void __debug_save_host_buffers_nvhe(struct kvm_cpu_context *host_ctxt);
> +void __debug_restore_host_buffers_nvhe(struct kvm_cpu_context *host_ctxt);
>   #endif
>   
>   void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
> diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
> index 89c208112eb7..128a57dddabf 100644
> --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
> +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
> @@ -14,12 +14,12 @@
>   #include <asm/kvm_hyp.h>
>   #include <asm/kvm_mmu.h>
>   
> -static void __debug_save_spe(u64 *pmscr_el1)
> +static void __debug_save_spe(struct kvm_cpu_context *host_ctxt)
>   {
>   	u64 reg;
>   
>   	/* Clear pmscr in case of early return */
> -	*pmscr_el1 = 0;
> +	ctxt_sys_reg(host_ctxt, PMSCR_EL1) = 0;
>   
>   	/*
>   	 * At this point, we know that this CPU implements
> @@ -31,7 +31,7 @@ static void __debug_save_spe(u64 *pmscr_el1)
>   		return;
>   
>   	/* Yes; save the control register and disable data generation */
> -	*pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
> +	ctxt_sys_reg(host_ctxt, PMSCR_EL1) = read_sysreg_s(SYS_PMSCR_EL1);
>   	write_sysreg_s(0, SYS_PMSCR_EL1);
>   	isb();
>   
> @@ -39,21 +39,21 @@ static void __debug_save_spe(u64 *pmscr_el1)
>   	psb_csync();
>   }
>   
> -static void __debug_restore_spe(u64 pmscr_el1)
> +static void __debug_restore_spe(struct kvm_cpu_context *host_ctxt)
>   {
> -	if (!pmscr_el1)
> +	if (!ctxt_sys_reg(host_ctxt, PMSCR_EL1))
>   		return;
>   
>   	/* The host page table is installed, but not yet synchronised */
>   	isb();
>   
>   	/* Re-enable data generation */
> -	write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
> +	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMSCR_EL1), SYS_PMSCR_EL1);
>   }
>   
> -static void __debug_save_trace(u64 *trfcr_el1)
> +static void __debug_save_trace(struct kvm_cpu_context *host_ctxt)
>   {
> -	*trfcr_el1 = 0;
> +	ctxt_sys_reg(host_ctxt, TRFCR_EL1) = 0;
>   
>   	/* Check if the TRBE is enabled */
>   	if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E))
> @@ -63,30 +63,30 @@ static void __debug_save_trace(u64 *trfcr_el1)
>   	 * Since access to TRFCR_EL1 is trapped, the guest can't
>   	 * modify the filtering set by the host.
>   	 */
> -	*trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
> +	ctxt_sys_reg(host_ctxt, TRFCR_EL1) = read_sysreg_s(SYS_TRFCR_EL1);
>   	write_sysreg_s(0, SYS_TRFCR_EL1);
>   	isb();
>   	/* Drain the trace buffer to memory */
>   	tsb_csync();
>   }
>   
> -static void __debug_restore_trace(u64 trfcr_el1)
> +static void __debug_restore_trace(struct kvm_cpu_context *host_ctxt)
>   {
> -	if (!trfcr_el1)
> +	if (!ctxt_sys_reg(host_ctxt, TRFCR_EL1))
>   		return;
>   
>   	/* Restore trace filter controls */
> -	write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
> +	write_sysreg_s(ctxt_sys_reg(host_ctxt, TRFCR_EL1), SYS_TRFCR_EL1);
>   }
>   
> -void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
> +void __debug_save_host_buffers_nvhe(struct kvm_cpu_context *host_ctxt)
>   {
>   	/* Disable and flush SPE data generation */
> -	if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
> -		__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
> +	if (vcpu_get_flag(host_ctxt->__hyp_running_vcpu, DEBUG_STATE_SAVE_SPE))
> +		__debug_save_spe(host_ctxt);
>   	/* Disable and flush Self-Hosted Trace generation */
> -	if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRFCR))
> -		__debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
> +	if (vcpu_get_flag(host_ctxt->__hyp_running_vcpu, DEBUG_STATE_SAVE_TRFCR))
> +		__debug_save_trace(host_ctxt);
>   }
>   
>   void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
> @@ -94,12 +94,12 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
>   	__debug_switch_to_guest_common(vcpu);
>   }
>   
> -void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
> +void __debug_restore_host_buffers_nvhe(struct kvm_cpu_context *host_ctxt)
>   {
> -	if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
> -		__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
> -	if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRFCR))
> -		__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
> +	if (vcpu_get_flag(host_ctxt->__hyp_running_vcpu, DEBUG_STATE_SAVE_SPE))
> +		__debug_restore_spe(host_ctxt);
> +	if (vcpu_get_flag(host_ctxt->__hyp_running_vcpu, DEBUG_STATE_SAVE_TRFCR))
> +		__debug_restore_trace(host_ctxt);
>   }
>   
>   void __debug_switch_to_host(struct kvm_vcpu *vcpu)
> diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
> index c353a06ee7e6..c8f15e4dab19 100644
> --- a/arch/arm64/kvm/hyp/nvhe/switch.c
> +++ b/arch/arm64/kvm/hyp/nvhe/switch.c
> @@ -276,7 +276,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
>   	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
>   	 * before we load guest Stage1.
>   	 */
> -	__debug_save_host_buffers_nvhe(vcpu);
> +	__debug_save_host_buffers_nvhe(host_ctxt);
>   
>   	/*
>   	 * We're about to restore some new MMU state. Make sure
> @@ -343,7 +343,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
>   	 * This must come after restoring the host sysregs, since a non-VHE
>   	 * system may enable SPE here and make use of the TTBRs.
>   	 */
> -	__debug_restore_host_buffers_nvhe(vcpu);
> +	__debug_restore_host_buffers_nvhe(host_ctxt);
>   
>   	if (pmu_switch_needed)
>   		__pmu_switch_to_host(vcpu);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ