[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221012203910.204793-2-john.allen@amd.com>
Date: Wed, 12 Oct 2022 20:39:04 +0000
From: John Allen <john.allen@....com>
To: <kvm@...r.kernel.org>
CC: <linux-kernel@...r.kernel.org>, <pbonzini@...hat.com>,
<weijiang.yang@...el.com>, <rick.p.edgecombe@...el.com>,
<seanjc@...gle.com>, <x86@...nel.org>, <thomas.lendacky@....com>,
John Allen <john.allen@....com>
Subject: [RFC PATCH 1/7] KVM: x86: Move shared CET routine to common x86 kvm code
cet_is_msr_accessible can also by used for shadow stack support in SVM.
Move this to common x86 kvm code.
Signed-off-by: John Allen <john.allen@....com>
---
arch/x86/kvm/vmx/vmx.c | 32 +++-----------------------------
arch/x86/kvm/x86.c | 26 ++++++++++++++++++++++++++
arch/x86/kvm/x86.h | 2 ++
3 files changed, 31 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4558b13d0610..8b79a727b29c 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1845,32 +1845,6 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
}
}
-static bool cet_is_msr_accessible(struct kvm_vcpu *vcpu,
- struct msr_data *msr)
-{
- if (!kvm_cet_user_supported() &&
- !cet_kernel_ibt_supported())
- return false;
-
- if (msr->host_initiated)
- return true;
-
- if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_IBT))
- return false;
-
- if (msr->index == MSR_IA32_S_CET &&
- guest_cpuid_has(vcpu, X86_FEATURE_IBT))
- return true;
-
- if ((msr->index == MSR_IA32_PL3_SSP ||
- msr->index == MSR_KVM_GUEST_SSP) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK))
- return false;
-
- return true;
-}
-
/*
* Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
* Returns 0 on success, non-0 otherwise.
@@ -2014,7 +1988,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_PL3_SSP:
case MSR_KVM_GUEST_SSP:
case MSR_IA32_S_CET:
- if (!cet_is_msr_accessible(vcpu, msr_info))
+ if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
return 1;
if (msr_info->index == MSR_KVM_GUEST_SSP)
msr_info->data = vmcs_readl(GUEST_SSP);
@@ -2363,7 +2337,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_U_CET:
case MSR_IA32_S_CET:
- if (!cet_is_msr_accessible(vcpu, msr_info))
+ if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
return 1;
if ((data & GENMASK(9, 6)) ||
is_noncanonical_address(data, vcpu))
@@ -2375,7 +2349,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_PL3_SSP:
case MSR_KVM_GUEST_SSP:
- if (!cet_is_msr_accessible(vcpu, msr_info))
+ if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
return 1;
if ((data & GENMASK(2, 0)) ||
is_noncanonical_address(data, vcpu))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5786225c0dfa..486e91f4a538 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -13475,6 +13475,32 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
}
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
+bool kvm_cet_is_msr_accessible(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+ if (!kvm_cet_user_supported() &&
+ !cet_kernel_ibt_supported())
+ return false;
+
+ if (msr->host_initiated)
+ return true;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_IBT))
+ return false;
+
+ if (msr->index == MSR_IA32_S_CET &&
+ guest_cpuid_has(vcpu, X86_FEATURE_IBT))
+ return true;
+
+ if ((msr->index == MSR_IA32_PL3_SSP ||
+ msr->index == MSR_KVM_GUEST_SSP) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(kvm_cet_is_msr_accessible);
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index a55f262d1e61..fb871be7131e 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -502,6 +502,8 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count,
int in);
+bool kvm_cet_is_msr_accessible(struct kvm_vcpu *vcpu, struct msr_data *msr);
+
/*
* We've already loaded guest MSRs in __msr_io() when check the MSR index.
* In case vcpu has been preempted, we need to disable preemption, check
--
2.34.3
Powered by blists - more mailing lists