lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 25 Feb 2020 15:16:46 +0100
From:   Vitaly Kuznetsov <vkuznets@...hat.com>
To:     Sean Christopherson <sean.j.christopherson@...el.com>
Cc:     Paolo Bonzini <pbonzini@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH 55/61] KVM: VMX: Directly query Intel PT mode when refreshing PMUs

Sean Christopherson <sean.j.christopherson@...el.com> writes:

> Use vmx_pt_mode_is_host_guest() in intel_pmu_refresh() instead of
> bouncing through kvm_x86_ops->pt_supported, and remove ->pt_supported()
> as the PMU code was the last remaining user.
>
> Opportunistically clean up the wording of a comment that referenced
> kvm_x86_ops->pt_supported().
>
> No functional change intended.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 2 --
>  arch/x86/kvm/svm.c              | 7 -------
>  arch/x86/kvm/vmx/pmu_intel.c    | 2 +-
>  arch/x86/kvm/vmx/vmx.c          | 6 ------
>  arch/x86/kvm/x86.c              | 7 +++----
>  5 files changed, 4 insertions(+), 20 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 1dd5ac8a2136..a8bae9d88bce 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1162,8 +1162,6 @@ struct kvm_x86_ops {
>  	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
>  		enum exit_fastpath_completion *exit_fastpath);
>  
> -	bool (*pt_supported)(void);
> -
>  	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
>  	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
>  
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 6dd9c810c0dc..a27f83f7521c 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -6074,11 +6074,6 @@ static int svm_get_lpage_level(void)
>  	return PT_PDPE_LEVEL;
>  }
>  
> -static bool svm_pt_supported(void)
> -{
> -	return false;
> -}
> -
>  static bool svm_has_wbinvd_exit(void)
>  {
>  	return true;
> @@ -7438,8 +7433,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
>  
>  	.cpuid_update = svm_cpuid_update,
>  
> -	.pt_supported = svm_pt_supported,
> -
>  	.set_supported_cpuid = svm_set_supported_cpuid,
>  
>  	.has_wbinvd_exit = svm_has_wbinvd_exit,
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index 34a3a17bb6d7..d8f5cb312b9d 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -330,7 +330,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>  	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
>  			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
>  			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
> -	if (kvm_x86_ops->pt_supported())
> +	if (vmx_pt_mode_is_host_guest())
>  		pmu->global_ovf_ctrl_mask &=
>  				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
>  
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 98d54cfa0cbe..e6284b6aac56 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -6283,11 +6283,6 @@ static bool vmx_has_emulated_msr(int index)
>  	}
>  }
>  
> -static bool vmx_pt_supported(void)
> -{
> -	return vmx_pt_mode_is_host_guest();
> -}
> -
>  static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
>  {
>  	u32 exit_intr_info;
> @@ -7876,7 +7871,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
>  
>  	.check_intercept = vmx_check_intercept,
>  	.handle_exit_irqoff = vmx_handle_exit_irqoff,
> -	.pt_supported = vmx_pt_supported,
>  
>  	.request_immediate_exit = vmx_request_immediate_exit,
>  
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 9d38dcdbb613..144143a57d0b 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2805,10 +2805,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		    !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
>  			return 1;
>  		/*
> -		 * We do support PT if kvm_x86_ops->pt_supported(), but we do
> -		 * not support IA32_XSS[bit 8]. Guests will have to use
> -		 * RDMSR/WRMSR rather than XSAVES/XRSTORS to save/restore PT
> -		 * MSRs.
> +		 * KVM supports exposing PT to the guest, but does not support
> +		 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
> +		 * XSAVES/XRSTORS to save/restore PT MSRs.

So the responsibility shifts from vague 'we' to KVM. There should be
a juridical term for that :-)

>  		 */
>  		if (data != 0)
>  			return 1;

Reviewed-by: Vitaly Kuznetsov <vkuznets@...hat.com>

-- 
Vitaly

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ