[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260130020735.2517101-2-yosry.ahmed@linux.dev>
Date: Fri, 30 Jan 2026 02:07:33 +0000
From: Yosry Ahmed <yosry.ahmed@...ux.dev>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>,
kvm@...r.kernel.org,
linux-kernel@...r.kernel.org,
Yosry Ahmed <yosry.ahmed@...ux.dev>
Subject: [PATCH 1/3] KVM: SVM: Refactor EFER.SVME switching logic out of svm_set_efer()
Move the logic of switching EFER.SVME in the guest outside of
svm_set_efer(). This makes it possible to easily check the skip
conditions separately (and add more) and reduce indentation level.
No functional change intended.
Signed-off-by: Yosry Ahmed <yosry.ahmed@...ux.dev>
---
arch/x86/kvm/svm/svm.c | 72 ++++++++++++++++++++++++------------------
1 file changed, 42 insertions(+), 30 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5f0136dbdde6b..4575a6a7d6c4e 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -200,11 +200,49 @@ static int get_npt_level(void)
#endif
}
+static int svm_set_efer_svme(struct kvm_vcpu *vcpu, u64 old_efer, u64 new_efer)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ int r;
+
+ if ((old_efer & EFER_SVME) == (new_efer & EFER_SVME))
+ return 0;
+
+ if (new_efer & EFER_SVME) {
+ r = svm_allocate_nested(svm);
+ if (r)
+ return r;
+
+ /*
+ * Never intercept #GP for SEV guests, KVM can't decrypt guest
+ * memory to workaround the erratum.
+ */
+ if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
+ set_exception_intercept(svm, GP_VECTOR);
+ } else {
+
+ svm_leave_nested(vcpu);
+ /* #GP intercept is still needed for vmware backdoor */
+ if (!enable_vmware_backdoor)
+ clr_exception_intercept(svm, GP_VECTOR);
+
+ /*
+ * Free the nested guest state, unless we are in SMM. In this
+ * case we will return to the nested guest as soon as we leave
+ * SMM.
+ */
+ if (!is_smm(vcpu))
+ svm_free_nested(svm);
+ }
+ return 0;
+}
+
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 old_efer = vcpu->arch.efer;
vcpu->arch.efer = efer;
+ int r;
if (!npt_enabled) {
/* Shadow paging assumes NX to be available. */
@@ -214,36 +252,10 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
efer &= ~EFER_LME;
}
- if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
- if (!(efer & EFER_SVME)) {
- svm_leave_nested(vcpu);
- /* #GP intercept is still needed for vmware backdoor */
- if (!enable_vmware_backdoor)
- clr_exception_intercept(svm, GP_VECTOR);
-
- /*
- * Free the nested guest state, unless we are in SMM.
- * In this case we will return to the nested guest
- * as soon as we leave SMM.
- */
- if (!is_smm(vcpu))
- svm_free_nested(svm);
-
- } else {
- int ret = svm_allocate_nested(svm);
-
- if (ret) {
- vcpu->arch.efer = old_efer;
- return ret;
- }
-
- /*
- * Never intercept #GP for SEV guests, KVM can't
- * decrypt guest memory to workaround the erratum.
- */
- if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
- set_exception_intercept(svm, GP_VECTOR);
- }
+ r = svm_set_efer_svme(vcpu, old_efer, efer);
+ if (r) {
+ vcpu->arch.efer = old_efer;
+ return r;
}
svm->vmcb->save.efer = efer | EFER_SVME;
--
2.53.0.rc1.225.gd81095ad13-goog
Powered by blists - more mailing lists