lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a258f712549eb42266a8109c51032fc77e297f49.camel@redhat.com>
Date:   Mon, 24 May 2021 20:50:55 +0300
From:   Maxim Levitsky <mlevitsk@...hat.com>
To:     Ilias Stamatis <ilstam@...zon.com>, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org, pbonzini@...hat.com
Cc:     seanjc@...gle.com, vkuznets@...hat.com, wanpengli@...cent.com,
        jmattson@...gle.com, joro@...tes.org, zamsden@...il.com,
        mtosatti@...hat.com, dwmw@...zon.co.uk
Subject: Re: [PATCH v3 06/12] KVM: X86: Add functions for retrieving L2 TSC
 fields from common code

On Fri, 2021-05-21 at 11:24 +0100, Ilias Stamatis wrote:
> In order to implement as much of the nested TSC scaling logic as
> possible in common code, we need these vendor callbacks for retrieving
> the TSC offset and the TSC multiplier that L1 has set for L2.
> 
> Signed-off-by: Ilias Stamatis <ilstam@...zon.com>
> ---
>  arch/x86/include/asm/kvm-x86-ops.h |  2 ++
>  arch/x86/include/asm/kvm_host.h    |  2 ++
>  arch/x86/kvm/svm/svm.c             | 14 ++++++++++++++
>  arch/x86/kvm/vmx/vmx.c             | 23 +++++++++++++++++++++++
>  arch/x86/kvm/vmx/vmx.h             |  3 +++
>  5 files changed, 44 insertions(+)
> 
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 323641097f63..2063616fba1c 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -87,6 +87,8 @@ KVM_X86_OP(set_identity_map_addr)
>  KVM_X86_OP(get_mt_mask)
>  KVM_X86_OP(load_mmu_pgd)
>  KVM_X86_OP_NULL(has_wbinvd_exit)
> +KVM_X86_OP(get_l2_tsc_offset)
> +KVM_X86_OP(get_l2_tsc_multiplier)
>  KVM_X86_OP(write_l1_tsc_offset)
>  KVM_X86_OP(get_exit_info)
>  KVM_X86_OP(check_intercept)
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index b14c2b2b2e21..0f2cf5d1240c 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1305,6 +1305,8 @@ struct kvm_x86_ops {
>  
>  	bool (*has_wbinvd_exit)(void);
>  
> +	u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
> +	u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
>  	/* Returns actual tsc_offset set in active VMCS */
>  	u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
>  
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 05eca131eaf2..ca70e46f9194 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1082,6 +1082,18 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
>  	seg->base = 0;
>  }
>  
> +static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_svm *svm = to_svm(vcpu);
> +
> +	return svm->nested.ctl.tsc_offset;
> +}
> +
> +static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
> +{
> +	return kvm_default_tsc_scaling_ratio;
I guess this makes sense as long as we don't support nested TSC scaling on SVM.
Or put a WARN_ON here maybe instead. Doesn't matter, as I'll implement this
right after this series is done, which should be very easy.

> +}
> +
>  static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
> @@ -4526,6 +4538,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
>  
>  	.has_wbinvd_exit = svm_has_wbinvd_exit,
>  
> +	.get_l2_tsc_offset = svm_get_l2_tsc_offset,
> +	.get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
>  	.write_l1_tsc_offset = svm_write_l1_tsc_offset,
>  
>  	.load_mmu_pgd = svm_load_mmu_pgd,
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 3e4dda8177bb..1c83605eccc1 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -1787,6 +1787,27 @@ static void setup_msrs(struct vcpu_vmx *vmx)
>  	vmx->guest_uret_msrs_loaded = false;
>  }
>  
> +u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
> +{
> +	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> +
> +	if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
> +		return vmcs12->tsc_offset;
> +
> +	return 0;
> +}
> +
> +u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
> +{
> +	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> +
> +	if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
> +	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
> +		return vmcs12->tsc_multiplier;
> +
> +	return kvm_default_tsc_scaling_ratio;

> +}
> +
>  static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
>  {
>  	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> @@ -7700,6 +7721,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
>  
>  	.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
>  
> +	.get_l2_tsc_offset = vmx_get_l2_tsc_offset,
> +	.get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
>  	.write_l1_tsc_offset = vmx_write_l1_tsc_offset,
>  
>  	.load_mmu_pgd = vmx_load_mmu_pgd,
> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
> index 16e4e457ba23..aa97c82e3451 100644
> --- a/arch/x86/kvm/vmx/vmx.h
> +++ b/arch/x86/kvm/vmx/vmx.h
> @@ -404,6 +404,9 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
>  void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
>  void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
>  
> +u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
> +u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
> +
>  static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
>  					     int type, bool value)
>  {

Looks great.
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>

Best regards,
	Maxim Levitsky



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ