[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <86da659dba9490f7b5b9b0cf1facb6e059d79720.1656366338.git.isaku.yamahata@intel.com>
Date: Mon, 27 Jun 2022 14:54:28 -0700
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com, isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>
Subject: [PATCH v7 096/102] KVM: TDX: Handle TDX PV map_gpa hypercall
From: Isaku Yamahata <isaku.yamahata@...el.com>
Wire up TDX PV map_gpa hypercall to the kvm/mmu backend.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
arch/x86/kvm/vmx/tdx.c | 60 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 60 insertions(+)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 00baecbb62ff..d4ac573d9db3 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1221,6 +1221,64 @@ static int tdx_report_fatal_error(struct kvm_vcpu *vcpu)
return 0;
}
+static int tdx_map_gpa(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ gpa_t gpa = tdvmcall_a0_read(vcpu);
+ gpa_t size = tdvmcall_a1_read(vcpu);
+ gpa_t end = gpa + size;
+ bool allow_private = kvm_is_private_gpa(kvm, gpa);
+
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_INVALID_OPERAND);
+ if (!IS_ALIGNED(gpa, 4096) || !IS_ALIGNED(size, 4096) ||
+ end < gpa ||
+ end > kvm_gfn_shared_mask(kvm) << (PAGE_SHIFT + 1) ||
+ kvm_is_private_gpa(kvm, gpa) != kvm_is_private_gpa(kvm, end))
+ return 1;
+
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_SUCCESS);
+
+#define TDX_MAP_GPA_SIZE_MAX (16 * 1024 * 1024)
+ while (gpa < end) {
+ gfn_t s = gpa_to_gfn(gpa);
+ gfn_t e = gpa_to_gfn(
+ min(roundup(gpa + 1, TDX_MAP_GPA_SIZE_MAX), end));
+ int ret = kvm_mmu_map_gpa(vcpu, &s, e, allow_private);
+
+ if (ret == -EAGAIN)
+ e = s;
+ else if (ret) {
+ tdvmcall_set_return_code(vcpu,
+ TDG_VP_VMCALL_INVALID_OPERAND);
+ break;
+ }
+
+ gpa = gfn_to_gpa(e);
+
+ /*
+ * TODO:
+ * Interrupt this hypercall invocation to return remaining
+ * region to the guest and let the guest to resume the
+ * hypercall.
+ *
+ * The TDX Guest-Hypervisor Communication Interface(GHCI)
+ * specification and guest implementation need to be updated.
+ *
+ * if (gpa < end && need_resched()) {
+ * size = end - gpa;
+ * tdvmcall_a0_write(vcpu, gpa);
+ * tdvmcall_a1_write(vcpu, size);
+ * tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_INTERRUPTED_RESUME);
+ * break;
+ * }
+ */
+ if (gpa < end && need_resched())
+ cond_resched();
+ }
+
+ return 1;
+}
+
static int handle_tdvmcall(struct kvm_vcpu *vcpu)
{
if (tdvmcall_exit_type(vcpu))
@@ -1241,6 +1299,8 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
return tdx_emulate_wrmsr(vcpu);
case TDG_VP_VMCALL_REPORT_FATAL_ERROR:
return tdx_report_fatal_error(vcpu);
+ case TDG_VP_VMCALL_MAP_GPA:
+ return tdx_map_gpa(vcpu);
default:
break;
}
--
2.25.1
Powered by blists - more mailing lists