[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a77477a2-cc64-ec8d-1258-f3222b6a358a@redhat.com>
Date: Tue, 26 Jan 2021 10:25:18 +0100
From: Paolo Bonzini <pbonzini@...hat.com>
To: Like Xu <like.xu@...ux.intel.com>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>
Cc: Ingo Molnar <mingo@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...en8.de>,
"H . Peter Anvin" <hpa@...or.com>, ak@...ux.intel.com,
wei.w.wang@...el.com, kan.liang@...el.com, x86@...nel.org,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [RESEND v13 01/10] KVM: x86: Move common set/get handler of
MSR_IA32_DEBUGCTLMSR to VMX
On 08/01/21 02:36, Like Xu wrote:
> SVM already has specific handlers of MSR_IA32_DEBUGCTLMSR in the
> svm_get/set_msr, so the x86 common part can be safely moved to VMX.
>
> Add vmx_supported_debugctl() to refactor the throwing logic of #GP.
>
> Signed-off-by: Like Xu <like.xu@...ux.intel.com>
> Reviewed-by: Andi Kleen <ak@...ux.intel.com>
> ---
> arch/x86/kvm/vmx/capabilities.h | 5 +++++
> arch/x86/kvm/vmx/vmx.c | 19 ++++++++++++++++---
> arch/x86/kvm/x86.c | 13 -------------
> 3 files changed, 21 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
> index 3a1861403d73..a58cf3655351 100644
> --- a/arch/x86/kvm/vmx/capabilities.h
> +++ b/arch/x86/kvm/vmx/capabilities.h
> @@ -378,4 +378,9 @@ static inline u64 vmx_get_perf_capabilities(void)
> return PMU_CAP_FW_WRITES;
> }
>
> +static inline u64 vmx_supported_debugctl(void)
> +{
> + return DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF;
> +}
> +
> #endif /* __KVM_X86_VMX_CAPS_H */
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 2af05d3b0590..23b46327527e 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -1924,6 +1924,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
> return 1;
> goto find_uret_msr;
> + case MSR_IA32_DEBUGCTLMSR:
> + msr_info->data = 0;
> + break;
> default:
> find_uret_msr:
> msr = vmx_find_uret_msr(vmx, msr_info->index);
> @@ -2002,9 +2005,19 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> VM_EXIT_SAVE_DEBUG_CONTROLS)
> get_vmcs12(vcpu)->guest_ia32_debugctl = data;
>
> - ret = kvm_set_msr_common(vcpu, msr_info);
> - break;
> -
> + if (!data) {
> + /* We support the non-activated case already */
> + return 0;
> + } else if (data & ~vmx_supported_debugctl()) {
> + /*
> + * Values other than LBR and BTF are vendor-specific,
> + * thus reserved and should throw a #GP.
> + */
> + return 1;
> + }
> + vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
> + __func__, data);
> + return 0;
> case MSR_IA32_BNDCFGS:
> if (!kvm_mpx_supported() ||
> (!msr_info->host_initiated &&
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 0287840b93e0..c765fd72a66c 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -3063,18 +3063,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> return 1;
> }
> break;
> - case MSR_IA32_DEBUGCTLMSR:
> - if (!data) {
> - /* We support the non-activated case already */
> - break;
> - } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
> - /* Values other than LBR and BTF are vendor-specific,
> - thus reserved and should throw a #GP */
> - return 1;
> - } else if (report_ignored_msrs)
> - vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
> - __func__, data);
> - break;
> case 0x200 ... 0x2ff:
> return kvm_mtrr_set_msr(vcpu, msr, data);
> case MSR_IA32_APICBASE:
> @@ -3347,7 +3335,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> switch (msr_info->index) {
> case MSR_IA32_PLATFORM_ID:
> case MSR_IA32_EBL_CR_POWERON:
> - case MSR_IA32_DEBUGCTLMSR:
> case MSR_IA32_LASTBRANCHFROMIP:
> case MSR_IA32_LASTBRANCHTOIP:
> case MSR_IA32_LASTINTFROMIP:
>
Queued, thanks.
Paolo
Powered by blists - more mailing lists