[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200709145358.1560330-5-vkuznets@redhat.com>
Date: Thu, 9 Jul 2020 16:53:53 +0200
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <sean.j.christopherson@...el.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Junaid Shahid <junaids@...gle.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 4/9] KVM: nSVM: prepare to handle errors from enter_svm_guest_mode()
Some operations in enter_svm_guest_mode() may fail, e.g. currently
we suppress kvm_set_cr3() return value. Prepare the code to proparate
errors.
No functional change intended.
Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
---
arch/x86/kvm/svm/nested.c | 26 ++++++++++++++++----------
arch/x86/kvm/svm/svm.c | 6 ++++--
arch/x86/kvm/svm/svm.h | 4 ++--
3 files changed, 22 insertions(+), 14 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 1cc8592b1820..5e6c988a4e6b 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -379,7 +379,7 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
mark_all_dirty(svm->vmcb);
}
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb)
{
svm->nested.vmcb = vmcb_gpa;
@@ -388,6 +388,8 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
nested_prepare_vmcb_control(svm);
svm_set_gif(svm, true);
+
+ return 0;
}
int nested_svm_vmrun(struct vcpu_svm *svm)
@@ -465,18 +467,22 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
copy_vmcb_control_area(&hsave->control, &vmcb->control);
svm->nested.nested_run_pending = 1;
- enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb);
- if (!nested_svm_vmrun_msrpm(svm)) {
- svm->nested.nested_run_pending = 0;
+ if (enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb))
+ goto out_exit_err;
- svm->vmcb->control.exit_code = SVM_EXIT_ERR;
- svm->vmcb->control.exit_code_hi = 0;
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
+ if (nested_svm_vmrun_msrpm(svm))
+ goto out;
- nested_svm_vmexit(svm);
- }
+out_exit_err:
+ svm->nested.nested_run_pending = 0;
+
+ svm->vmcb->control.exit_code = SVM_EXIT_ERR;
+ svm->vmcb->control.exit_code_hi = 0;
+ svm->vmcb->control.exit_info_1 = 0;
+ svm->vmcb->control.exit_info_2 = 0;
+
+ nested_svm_vmexit(svm);
out:
kvm_vcpu_unmap(&svm->vcpu, &map, true);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c0da4dd78ac5..b8ec56fe5478 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3843,6 +3843,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
struct kvm_host_map map;
u64 guest;
u64 vmcb;
+ int ret = 0;
guest = GET_SMSTATE(u64, smstate, 0x7ed8);
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
@@ -3851,10 +3852,11 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
return 1;
nested_vmcb = map.hva;
- enter_svm_guest_mode(svm, vmcb, nested_vmcb);
+ ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb);
kvm_vcpu_unmap(&svm->vcpu, &map, true);
}
- return 0;
+
+ return ret;
}
static void enable_smi_window(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 6ac4c00a5d82..f98649af11d1 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -387,8 +387,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_NMI));
}
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
- struct vmcb *nested_vmcb);
+int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+ struct vmcb *nested_vmcb);
void svm_leave_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct vcpu_svm *svm);
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
--
2.25.4
Powered by blists - more mailing lists