[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250129095902.16391-3-adrian.hunter@intel.com>
Date: Wed, 29 Jan 2025 11:58:51 +0200
From: Adrian Hunter <adrian.hunter@...el.com>
To: pbonzini@...hat.com,
seanjc@...gle.com
Cc: kvm@...r.kernel.org,
rick.p.edgecombe@...el.com,
kai.huang@...el.com,
adrian.hunter@...el.com,
reinette.chatre@...el.com,
xiaoyao.li@...el.com,
tony.lindgren@...ux.intel.com,
binbin.wu@...ux.intel.com,
dmatlack@...gle.com,
isaku.yamahata@...el.com,
nik.borisov@...e.com,
linux-kernel@...r.kernel.org,
yan.y.zhao@...el.com,
chao.gao@...el.com,
weijiang.yang@...el.com
Subject: [PATCH V2 02/12] KVM: x86: Allow the use of kvm_load_host_xsave_state() with guest_state_protected
From: Sean Christopherson <seanjc@...gle.com>
Allow the use of kvm_load_host_xsave_state() with
vcpu->arch.guest_state_protected == true. This will allow TDX to reuse
kvm_load_host_xsave_state() instead of creating its own version.
For consistency, amend kvm_load_guest_xsave_state() also.
Ensure that guest state that kvm_load_host_xsave_state() depends upon,
such as MSR_IA32_XSS, cannot be changed by user space, if
guest_state_protected.
[Adrian: wrote commit message]
Link: https://lore.kernel.org/r/Z2GiQS_RmYeHU09L@google.com
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Adrian Hunter <adrian.hunter@...el.com>
---
TD vcpu enter/exit v2:
- New patch
---
arch/x86/kvm/svm/svm.c | 7 +++++--
arch/x86/kvm/x86.c | 18 +++++++++++-------
2 files changed, 16 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 7640a84e554a..b4bcfe15ad5e 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4253,7 +4253,9 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
svm_set_dr6(svm, DR6_ACTIVE_LOW);
clgi();
- kvm_load_guest_xsave_state(vcpu);
+
+ if (!vcpu->arch.guest_state_protected)
+ kvm_load_guest_xsave_state(vcpu);
kvm_wait_lapic_expire(vcpu);
@@ -4282,7 +4284,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
- kvm_load_host_xsave_state(vcpu);
+ if (!vcpu->arch.guest_state_protected)
+ kvm_load_host_xsave_state(vcpu);
stgi();
/* Any pending NMI will happen here */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bbb6b7f40b3a..5cf9f023fd4b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1169,11 +1169,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.guest_state_protected)
- return;
+ WARN_ON_ONCE(vcpu->arch.guest_state_protected);
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
-
if (vcpu->arch.xcr0 != kvm_host.xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
@@ -1192,13 +1190,11 @@ EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.guest_state_protected)
- return;
-
if (cpu_feature_enabled(X86_FEATURE_PKU) &&
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
- vcpu->arch.pkru = rdpkru();
+ if (!vcpu->arch.guest_state_protected)
+ vcpu->arch.pkru = rdpkru();
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
wrpkru(vcpu->arch.host_pkru);
}
@@ -3916,6 +3912,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
return 1;
+
+ if (vcpu->arch.guest_state_protected)
+ return 1;
+
/*
* KVM supports exposing PT to the guest, but does not support
* IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
@@ -4375,6 +4375,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
return 1;
+
+ if (vcpu->arch.guest_state_protected)
+ return 1;
+
msr_info->data = vcpu->arch.ia32_xss;
break;
case MSR_K7_CLK_CTL:
--
2.43.0
Powered by blists - more mailing lists