[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <e02b1dcd23ad511c37edc50d1c3b6bfca219a36b.1685333728.git.isaku.yamahata@intel.com>
Date: Sun, 28 May 2023 21:20:12 -0700
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com, isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>, erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com
Subject: [PATCH v14 090/113] KVM: TDX: Add KVM Exit for TDX TDG.VP.VMCALL
From: Isaku Yamahata <isaku.yamahata@...el.com>
Some of TDG.VP.VMCALL require device model, for example, qemu, to handle
them on behalf of kvm kernel module. TDG_VP_VMCALL_REPORT_FATAL_ERROR,
TDG_VP_VMCALL_MAP_GPA, TDG_VP_VMCALL_SETUP_EVENT_NOTIFY_INTERRUPT, and
TDG_VP_VMCALL_GET_QUOTE requires user space VMM handling.
Introduce new kvm exit, KVM_EXIT_TDX, and functions to setup it.
TDG_VP_VMCALL_INVALID_OPERAND is set as default return value to avoid
random value. Device model should update R10 if necessary.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
arch/x86/kvm/vmx/tdx.c | 98 +++++++++++++++++++++++++++++++++++++++-
include/uapi/linux/kvm.h | 57 +++++++++++++++++++++++
2 files changed, 153 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 8d3a5f9d208a..d53e263794b4 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -123,6 +123,18 @@ BUILD_TDVMCALL_ACCESSORS(a1, r13);
BUILD_TDVMCALL_ACCESSORS(a2, r14);
BUILD_TDVMCALL_ACCESSORS(a3, r15);
+#define TDX_VMCALL_REG_MASK_RBX BIT_ULL(2)
+#define TDX_VMCALL_REG_MASK_RDX BIT_ULL(3)
+#define TDX_VMCALL_REG_MASK_RBP BIT_ULL(5)
+#define TDX_VMCALL_REG_MASK_RSI BIT_ULL(6)
+#define TDX_VMCALL_REG_MASK_RDI BIT_ULL(7)
+#define TDX_VMCALL_REG_MASK_R8 BIT_ULL(8)
+#define TDX_VMCALL_REG_MASK_R9 BIT_ULL(9)
+#define TDX_VMCALL_REG_MASK_R12 BIT_ULL(12)
+#define TDX_VMCALL_REG_MASK_R13 BIT_ULL(13)
+#define TDX_VMCALL_REG_MASK_R14 BIT_ULL(14)
+#define TDX_VMCALL_REG_MASK_R15 BIT_ULL(15)
+
static __always_inline unsigned long tdvmcall_exit_type(struct kvm_vcpu *vcpu)
{
return kvm_r10_read(vcpu);
@@ -896,6 +908,80 @@ static int tdx_emulate_vmcall(struct kvm_vcpu *vcpu)
return 1;
}
+static int tdx_complete_vp_vmcall(struct kvm_vcpu *vcpu)
+{
+ struct kvm_tdx_vmcall *tdx_vmcall = &vcpu->run->tdx.u.vmcall;
+ __u64 reg_mask;
+
+ tdvmcall_set_return_code(vcpu, tdx_vmcall->status_code);
+ tdvmcall_set_return_val(vcpu, tdx_vmcall->out_r11);
+
+ reg_mask = kvm_rcx_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R12)
+ kvm_r12_write(vcpu, tdx_vmcall->out_r12);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R13)
+ kvm_r13_write(vcpu, tdx_vmcall->out_r13);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R14)
+ kvm_r14_write(vcpu, tdx_vmcall->out_r14);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R15)
+ kvm_r15_write(vcpu, tdx_vmcall->out_r15);
+ if (reg_mask & TDX_VMCALL_REG_MASK_RBX)
+ kvm_rbx_write(vcpu, tdx_vmcall->out_rbx);
+ if (reg_mask & TDX_VMCALL_REG_MASK_RDI)
+ kvm_rdi_write(vcpu, tdx_vmcall->out_rdi);
+ if (reg_mask & TDX_VMCALL_REG_MASK_RSI)
+ kvm_rsi_write(vcpu, tdx_vmcall->out_rsi);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R8)
+ kvm_r8_write(vcpu, tdx_vmcall->out_r8);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R9)
+ kvm_r9_write(vcpu, tdx_vmcall->out_r9);
+ if (reg_mask & TDX_VMCALL_REG_MASK_RDX)
+ kvm_rdx_write(vcpu, tdx_vmcall->out_rdx);
+
+ return 1;
+}
+
+static int tdx_vp_vmcall_to_user(struct kvm_vcpu *vcpu)
+{
+ struct kvm_tdx_vmcall *tdx_vmcall = &vcpu->run->tdx.u.vmcall;
+ __u64 reg_mask;
+
+ vcpu->arch.complete_userspace_io = tdx_complete_vp_vmcall;
+ memset(tdx_vmcall, 0, sizeof(*tdx_vmcall));
+
+ vcpu->run->exit_reason = KVM_EXIT_TDX;
+ vcpu->run->tdx.type = KVM_EXIT_TDX_VMCALL;
+ tdx_vmcall->type = tdvmcall_exit_type(vcpu);
+ tdx_vmcall->subfunction = tdvmcall_leaf(vcpu);
+ tdx_vmcall->status_code = TDG_VP_VMCALL_INVALID_OPERAND;
+
+ reg_mask = kvm_rcx_read(vcpu);
+ tdx_vmcall->reg_mask = reg_mask;
+ if (reg_mask & TDX_VMCALL_REG_MASK_R12)
+ tdx_vmcall->in_r12 = kvm_r12_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R13)
+ tdx_vmcall->in_r13 = kvm_r13_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R14)
+ tdx_vmcall->in_r14 = kvm_r14_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R15)
+ tdx_vmcall->in_r15 = kvm_r15_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_RBX)
+ tdx_vmcall->in_rbx = kvm_rbx_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_RDI)
+ tdx_vmcall->in_rdi = kvm_rdi_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_RSI)
+ tdx_vmcall->in_rsi = kvm_rsi_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R8)
+ tdx_vmcall->in_r8 = kvm_r8_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_R9)
+ tdx_vmcall->in_r9 = kvm_r9_read(vcpu);
+ if (reg_mask & TDX_VMCALL_REG_MASK_RDX)
+ tdx_vmcall->in_rdx = kvm_rdx_read(vcpu);
+
+ /* notify userspace to handle the request */
+ return 0;
+}
+
static int handle_tdvmcall(struct kvm_vcpu *vcpu)
{
if (tdvmcall_exit_type(vcpu))
@@ -906,8 +992,16 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
break;
}
- tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_INVALID_OPERAND);
- return 1;
+ /*
+ * Unknown VMCALL. Toss the request to the user space VMM, e.g. qemu,
+ * as it may know how to handle.
+ *
+ * Those VMCALLs require user space VMM:
+ * TDG_VP_VMCALL_REPORT_FATAL_ERROR, TDG_VP_VMCALL_MAP_GPA,
+ * TDG_VP_VMCALL_SETUP_EVENT_NOTIFY_INTERRUPT, and
+ * TDG_VP_VMCALL_GET_QUOTE.
+ */
+ return tdx_vp_vmcall_to_user(vcpu);
}
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 18705e661c9e..7edcbef7626b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -237,6 +237,60 @@ struct kvm_xen_exit {
} u;
};
+struct kvm_tdx_exit {
+#define KVM_EXIT_TDX_VMCALL 1
+ __u32 type;
+ __u32 pad;
+
+ union {
+ struct kvm_tdx_vmcall {
+ /*
+ * Guest-Host-Communication Interface for TDX spec
+ * defines the ABI for TDG.VP.VMCALL.
+ */
+
+ /* Input parameters: guest -> VMM */
+ __u64 type; /* r10 */
+ __u64 subfunction; /* r11 */
+ __u64 reg_mask; /* rcx */
+ /*
+ * Subfunction specific.
+ * Registers are used in this order to pass input
+ * arguments. r12=arg0, r13=arg1, etc.
+ */
+ __u64 in_r12;
+ __u64 in_r13;
+ __u64 in_r14;
+ __u64 in_r15;
+ __u64 in_rbx;
+ __u64 in_rdi;
+ __u64 in_rsi;
+ __u64 in_r8;
+ __u64 in_r9;
+ __u64 in_rdx;
+
+ /* Output parameters: VMM -> guest */
+ __u64 status_code; /* r10 */
+ /*
+ * Subfunction specific.
+ * Registers are used in this order to output return
+ * values. r11=ret0, r12=ret1, etc.
+ */
+ __u64 out_r11;
+ __u64 out_r12;
+ __u64 out_r13;
+ __u64 out_r14;
+ __u64 out_r15;
+ __u64 out_rbx;
+ __u64 out_rdi;
+ __u64 out_rsi;
+ __u64 out_r8;
+ __u64 out_r9;
+ __u64 out_rdx;
+ } vmcall;
+ } u;
+};
+
#define KVM_S390_GET_SKEYS_NONE 1
#define KVM_S390_SKEYS_MAX 1048576
@@ -279,6 +333,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_RISCV_CSR 36
#define KVM_EXIT_NOTIFY 37
#define KVM_EXIT_MEMORY_FAULT 38
+#define KVM_EXIT_TDX 39
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -532,6 +587,8 @@ struct kvm_run {
__u64 gpa;
__u64 size;
} memory;
+ /* KVM_EXIT_TDX_VMCALL */
+ struct kvm_tdx_exit tdx;
/* Fix the size of the union. */
char padding[256];
};
--
2.25.1
Powered by blists - more mailing lists