[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240410143446.797262-8-chao.gao@intel.com>
Date: Wed, 10 Apr 2024 22:34:35 +0800
From: Chao Gao <chao.gao@...el.com>
To: kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: daniel.sneddon@...ux.intel.com,
pawan.kumar.gupta@...ux.intel.com,
Zhang Chen <chen.zhang@...el.com>,
Chao Gao <chao.gao@...el.com>,
Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>
Subject: [RFC PATCH v3 07/10] KVM: x86: Advertise ARCH_CAP_VIRTUAL_ENUM support
From: Zhang Chen <chen.zhang@...el.com>
Bit 63 of IA32_ARCH_CAPABILITIES MSR indicates availablility of the
VIRTUAL_ENUMERATION_MSR (index 0x50000000) which enumerates features
like e.g., mitigation enumeration that in turn is used for the guest to
report software mitigations it is using.
Advertise ARCH_CAP_VIRTUAL_ENUM support for VMX and emulate read/write
of the VIRTUAL_ENUMERATION_MSR. Now VIRTUAL_ENUMERATION_MSR is always 0.
Signed-off-by: Zhang Chen <chen.zhang@...el.com>
Co-developed-by: Chao Gao <chao.gao@...el.com>
Signed-off-by: Chao Gao <chao.gao@...el.com>
---
arch/x86/kvm/svm/svm.c | 1 +
arch/x86/kvm/vmx/vmx.c | 19 +++++++++++++++++++
arch/x86/kvm/vmx/vmx.h | 2 ++
arch/x86/kvm/x86.c | 16 +++++++++++++++-
4 files changed, 37 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d1a9f9951635..e3406971a8b7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4288,6 +4288,7 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
{
switch (index) {
case MSR_IA32_MCG_EXT_CTL:
+ case MSR_VIRTUAL_ENUMERATION:
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
return false;
case MSR_IA32_SMBASE:
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index cdfcc1290d82..dcb06406fd09 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1955,6 +1955,8 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
return !(msr->data & ~valid_bits);
}
+#define VIRTUAL_ENUMERATION_VALID_BITS 0ULL
+
static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
{
switch (msr->index) {
@@ -1962,6 +1964,9 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
if (!nested)
return 1;
return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
+ case MSR_VIRTUAL_ENUMERATION:
+ msr->data = VIRTUAL_ENUMERATION_VALID_BITS;
+ return 0;
default:
return KVM_MSR_RET_INVALID;
}
@@ -2113,6 +2118,12 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_DEBUGCTLMSR:
msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
break;
+ case MSR_VIRTUAL_ENUMERATION:
+ if (!msr_info->host_initiated &&
+ !(vcpu->arch.arch_capabilities & ARCH_CAP_VIRTUAL_ENUM))
+ return 1;
+ msr_info->data = vmx->msr_virtual_enumeration;
+ break;
default:
find_uret_msr:
msr = vmx_find_uret_msr(vmx, msr_info->index);
@@ -2457,6 +2468,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
ret = kvm_set_msr_common(vcpu, msr_info);
break;
+ case MSR_VIRTUAL_ENUMERATION:
+ if (!msr_info->host_initiated)
+ return 1;
+ if (data & ~VIRTUAL_ENUMERATION_VALID_BITS)
+ return 1;
+
+ vmx->msr_virtual_enumeration = data;
+ break;
default:
find_uret_msr:
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index a4dfe538e5a8..0519cf6187ac 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -294,6 +294,8 @@ struct vcpu_vmx {
u64 force_spec_ctrl_mask;
u64 force_spec_ctrl_value;
+ u64 msr_virtual_enumeration;
+
u32 msr_ia32_umwait_control;
/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9a59b5a93d0e..4721b6fe7641 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1564,6 +1564,7 @@ static const u32 emulated_msrs_all[] = {
MSR_K7_HWCR,
MSR_KVM_POLL_CONTROL,
+ MSR_VIRTUAL_ENUMERATION,
};
static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
@@ -1579,6 +1580,7 @@ static const u32 msr_based_features_all_except_vmx[] = {
MSR_IA32_UCODE_REV,
MSR_IA32_ARCH_CAPABILITIES,
MSR_IA32_PERF_CAPABILITIES,
+ MSR_VIRTUAL_ENUMERATION,
};
static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
@@ -1621,7 +1623,8 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
- ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
+ ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | \
+ ARCH_CAP_VIRTUAL_ENUM)
static u64 kvm_get_arch_capabilities(void)
{
@@ -1635,6 +1638,17 @@ static u64 kvm_get_arch_capabilities(void)
*/
data |= ARCH_CAP_PSCHANGE_MC_NO;
+ /*
+ * Virtual enumeration is a paravirt feature. The only usage for now
+ * is to bridge the gap caused by microarchitecture changes between
+ * different Intel processors. And its usage is linked to "virtualize
+ * IA32_SPEC_CTRL" which is a VMX feature. Whether AMD SVM can benefit
+ * from the same usage and how to implement it is still unclear. Limit
+ * virtual enumeration to VMX.
+ */
+ if (static_call(kvm_x86_has_emulated_msr)(NULL, MSR_VIRTUAL_ENUMERATION))
+ data |= ARCH_CAP_VIRTUAL_ENUM;
+
/*
* If we're doing cache flushes (either "always" or "cond")
* we will do one whenever the guest does a vmlaunch/vmresume.
--
2.39.3
Powered by blists - more mailing lists