[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241128013424.4096668-45-seanjc@google.com>
Date: Wed, 27 Nov 2024 17:34:11 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>, Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>, Jarkko Sakkinen <jarkko@...nel.org>
Cc: kvm@...r.kernel.org, linux-sgx@...r.kernel.org,
linux-kernel@...r.kernel.org, Maxim Levitsky <mlevitsk@...hat.com>,
Hou Wenlong <houwenlong.hwl@...group.com>, Xiaoyao Li <xiaoyao.li@...el.com>,
Kechen Lu <kechenl@...dia.com>, Oliver Upton <oliver.upton@...ux.dev>,
Binbin Wu <binbin.wu@...ux.intel.com>, Yang Weijiang <weijiang.yang@...el.com>,
Robert Hoo <robert.hoo.linux@...il.com>
Subject: [PATCH v3 44/57] KVM: x86: Initialize guest cpu_caps based on KVM support
Constrain all guest cpu_caps based on KVM support instead of constraining
only the few features that KVM _currently_ needs to verify are actually
supported by KVM. The intent of cpu_caps is to track what the guest is
actually capable of using, not the raw, unfiltered CPUID values that the
guest sees.
I.e. KVM should always consult it's only support when making decisions
based on guest CPUID, and the only reason KVM has historically made the
checks opt-in was due to lack of centralized tracking.
Suggested-by: Maxim Levitsky <mlevitsk@...hat.com>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/cpuid.c | 15 ++++++++++++++-
arch/x86/kvm/cpuid.h | 7 -------
arch/x86/kvm/svm/svm.c | 11 -----------
arch/x86/kvm/vmx/vmx.c | 9 ++-------
4 files changed, 16 insertions(+), 26 deletions(-)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0c63492f119d..8015d6b52a69 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -355,6 +355,9 @@ static u32 cpuid_get_reg_unsafe(struct kvm_cpuid_entry2 *entry, u32 reg)
}
}
+static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func,
+ bool include_partially_emulated);
+
void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
@@ -373,6 +376,7 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
*/
for (i = 0; i < NR_KVM_CPU_CAPS; i++) {
const struct cpuid_reg cpuid = reverse_cpuid[i];
+ struct kvm_cpuid_entry2 emulated;
if (!cpuid.function)
continue;
@@ -381,7 +385,16 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (!entry)
continue;
- vcpu->arch.cpu_caps[i] = cpuid_get_reg_unsafe(entry, cpuid.reg);
+ cpuid_func_emulated(&emulated, cpuid.function, true);
+
+ /*
+ * A vCPU has a feature if it's supported by KVM and is enabled
+ * in guest CPUID. Note, this includes features that are
+ * supported by KVM but aren't advertised to userspace!
+ */
+ vcpu->arch.cpu_caps[i] = kvm_cpu_caps[i] |
+ cpuid_get_reg_unsafe(&emulated, cpuid.reg);
+ vcpu->arch.cpu_caps[i] &= cpuid_get_reg_unsafe(entry, cpuid.reg);
}
kvm_update_cpuid_runtime(vcpu);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 8c9d6be8cb58..27da0964355c 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -263,13 +263,6 @@ static __always_inline void guest_cpu_cap_change(struct kvm_vcpu *vcpu,
guest_cpu_cap_clear(vcpu, x86_feature);
}
-static __always_inline void guest_cpu_cap_constrain(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
-{
- if (!kvm_cpu_cap_has(x86_feature))
- guest_cpu_cap_clear(vcpu, x86_feature);
-}
-
static __always_inline bool guest_cpu_cap_has(struct kvm_vcpu *vcpu,
unsigned int x86_feature)
{
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 3b94cb6c2b7a..0045fe474023 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4406,10 +4406,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
boot_cpu_has(X86_FEATURE_XSAVES) &&
guest_cpuid_has(vcpu, X86_FEATURE_XSAVE));
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_NRIPS);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_TSCRATEMSR);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_LBRV);
-
/*
* Intercept VMLOAD if the vCPU model is Intel in order to emulate that
* VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
@@ -4417,13 +4413,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
*/
if (guest_cpuid_is_intel_compatible(vcpu))
guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
- else
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
-
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_PAUSEFILTER);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_PFTHRESHOLD);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_VGIF);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_VNMI);
svm_recalc_instruction_intercepts(vcpu, svm);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8b95ba323a17..a7c2c36f2a4f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7828,15 +7828,10 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
* to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
* set if and only if XSAVE is supported.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_XSAVES);
- else
+ if (!boot_cpu_has(X86_FEATURE_XSAVE) ||
+ !guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
guest_cpu_cap_clear(vcpu, X86_FEATURE_XSAVES);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_VMX);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_LAM);
-
vmx_setup_uret_msrs(vmx);
if (cpu_has_secondary_exec_ctrls())
--
2.47.0.338.g60cca15819-goog
Powered by blists - more mailing lists