[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240517173926.965351-41-seanjc@google.com>
Date: Fri, 17 May 2024 10:39:17 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>, Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Hou Wenlong <houwenlong.hwl@...group.com>, Kechen Lu <kechenl@...dia.com>,
Oliver Upton <oliver.upton@...ux.dev>, Maxim Levitsky <mlevitsk@...hat.com>,
Binbin Wu <binbin.wu@...ux.intel.com>, Yang Weijiang <weijiang.yang@...el.com>,
Robert Hoo <robert.hoo.linux@...il.com>
Subject: [PATCH v2 40/49] KVM: x86: Initialize guest cpu_caps based on KVM support
Constrain all guest cpu_caps based on KVM support instead of constraining
only the few features that KVM _currently_ needs to verify are actually
supported by KVM. The intent of cpu_caps is to track what the guest is
actually capable of using, not the raw, unfiltered CPUID values that the
guest sees.
I.e. KVM should always consult it's only support when making decisions
based on guest CPUID, and the only reason KVM has historically made the
checks opt-in was due to lack of centralized tracking.
Suggested-by: Maxim Levitsky <mlevitsk@...hat.com>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/cpuid.c | 14 +++++++++++++-
arch/x86/kvm/cpuid.h | 7 -------
arch/x86/kvm/svm/svm.c | 11 -----------
arch/x86/kvm/vmx/vmx.c | 9 ++-------
4 files changed, 15 insertions(+), 26 deletions(-)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index d1849fe874ab..8ada1cac8fcb 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -403,6 +403,8 @@ static u32 cpuid_get_reg_unsafe(struct kvm_cpuid_entry2 *entry, u32 reg)
}
}
+static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func);
+
void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
@@ -421,6 +423,7 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
*/
for (i = 0; i < NR_KVM_CPU_CAPS; i++) {
const struct cpuid_reg cpuid = reverse_cpuid[i];
+ struct kvm_cpuid_entry2 emulated;
if (!cpuid.function)
continue;
@@ -429,7 +432,16 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (!entry)
continue;
- vcpu->arch.cpu_caps[i] = cpuid_get_reg_unsafe(entry, cpuid.reg);
+ cpuid_func_emulated(&emulated, cpuid.function);
+
+ /*
+ * A vCPU has a feature if it's supported by KVM and is enabled
+ * in guest CPUID. Note, this includes features that are
+ * supported by KVM but aren't advertised to userspace!
+ */
+ vcpu->arch.cpu_caps[i] = kvm_cpu_caps[i] | kvm_vmm_cpu_caps[i] |
+ cpuid_get_reg_unsafe(&emulated, cpuid.reg);
+ vcpu->arch.cpu_caps[i] &= cpuid_get_reg_unsafe(entry, cpuid.reg);
}
kvm_update_cpuid_runtime(vcpu);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index c2c2b8aa347b..60da304db4e4 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -284,13 +284,6 @@ static __always_inline void guest_cpu_cap_change(struct kvm_vcpu *vcpu,
guest_cpu_cap_clear(vcpu, x86_feature);
}
-static __always_inline void guest_cpu_cap_constrain(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
-{
- if (!kvm_cpu_cap_has(x86_feature))
- guest_cpu_cap_clear(vcpu, x86_feature);
-}
-
static __always_inline bool guest_cpu_cap_has(struct kvm_vcpu *vcpu,
unsigned int x86_feature)
{
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 1bc431a7e862..946a75771946 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4344,10 +4344,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
boot_cpu_has(X86_FEATURE_XSAVES) &&
guest_cpuid_has(vcpu, X86_FEATURE_XSAVE));
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_NRIPS);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_TSCRATEMSR);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_LBRV);
-
/*
* Intercept VMLOAD if the vCPU mode is Intel in order to emulate that
* VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
@@ -4355,13 +4351,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
*/
if (guest_cpuid_is_intel(vcpu))
guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
- else
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
-
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_PAUSEFILTER);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_PFTHRESHOLD);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_VGIF);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_VNMI);
svm_recalc_instruction_intercepts(vcpu, svm);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d873386e1473..653c4b68ec7f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7836,15 +7836,10 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
* to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
* set if and only if XSAVE is supported.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_XSAVES);
- else
+ if (!boot_cpu_has(X86_FEATURE_XSAVE) ||
+ !guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
guest_cpu_cap_clear(vcpu, X86_FEATURE_XSAVES);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_VMX);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_LAM);
-
vmx_setup_uret_msrs(vmx);
if (cpu_has_secondary_exec_ctrls())
--
2.45.0.215.g3402c0e53f-goog
Powered by blists - more mailing lists