[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210205005750.3841462-5-seanjc@google.com>
Date: Thu, 4 Feb 2021 16:57:45 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>,
Jiri Kosina <trivial@...nel.org>
Cc: Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 4/9] KVM: nSVM: Add VMLOAD/VMSAVE helper to deduplicate code
Add another helper layer for VMLOAD+VMSAVE, the code is identical except
for the one line that determines which VMCB is the source and which is
the destination.
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/svm/svm.c | 79 ++++++++++++++++++------------------------
1 file changed, 33 insertions(+), 46 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0139cb259093..d8c3bb33e59c 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2131,58 +2131,45 @@ static int vmmcall_interception(struct kvm_vcpu *vcpu)
return kvm_emulate_hypercall(vcpu);
}
+static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb *vmcb12;
+ struct kvm_host_map map;
+ int ret;
+
+ if (nested_svm_check_permissions(vcpu))
+ return 1;
+
+ ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
+ if (ret) {
+ if (ret == -EINVAL)
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ vmcb12 = map.hva;
+
+ ret = kvm_skip_emulated_instruction(vcpu);
+
+ if (vmload)
+ nested_svm_vmloadsave(vmcb12, svm->vmcb);
+ else
+ nested_svm_vmloadsave(svm->vmcb, vmcb12);
+
+ kvm_vcpu_unmap(vcpu, &map, true);
+
+ return ret;
+}
+
static int vmload_interception(struct kvm_vcpu *vcpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *vmcb12;
- struct kvm_host_map map;
- int ret;
-
- if (nested_svm_check_permissions(vcpu))
- return 1;
-
- ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
- if (ret) {
- if (ret == -EINVAL)
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
- vmcb12 = map.hva;
-
- ret = kvm_skip_emulated_instruction(vcpu);
-
- nested_svm_vmloadsave(vmcb12, svm->vmcb);
- kvm_vcpu_unmap(vcpu, &map, true);
-
- return ret;
+ return vmload_vmsave_interception(vcpu, true);
}
static int vmsave_interception(struct kvm_vcpu *vcpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *vmcb12;
- struct kvm_host_map map;
- int ret;
-
- if (nested_svm_check_permissions(vcpu))
- return 1;
-
- ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
- if (ret) {
- if (ret == -EINVAL)
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
- vmcb12 = map.hva;
-
- ret = kvm_skip_emulated_instruction(vcpu);
-
- nested_svm_vmloadsave(svm->vmcb, vmcb12);
- kvm_vcpu_unmap(vcpu, &map, true);
-
- return ret;
+ return vmload_vmsave_interception(vcpu, false);
}
static int vmrun_interception(struct kvm_vcpu *vcpu)
--
2.30.0.365.g02bc693789-goog
Powered by blists - more mailing lists