lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240812224820.34826-5-rick.p.edgecombe@intel.com>
Date: Mon, 12 Aug 2024 15:47:59 -0700
From: Rick Edgecombe <rick.p.edgecombe@...el.com>
To: seanjc@...gle.com,
	pbonzini@...hat.com,
	kvm@...r.kernel.org
Cc: kai.huang@...el.com,
	isaku.yamahata@...il.com,
	tony.lindgren@...ux.intel.com,
	xiaoyao.li@...el.com,
	linux-kernel@...r.kernel.org,
	rick.p.edgecombe@...el.com,
	Isaku Yamahata <isaku.yamahata@...el.com>,
	Sean Christopherson <sean.j.christopherson@...el.com>,
	Binbin Wu <binbin.wu@...ux.intel.com>,
	Yuan Yao <yuan.yao@...el.com>
Subject: [PATCH 04/25] KVM: TDX: Add C wrapper functions for SEAMCALLs to the TDX module

From: Isaku Yamahata <isaku.yamahata@...el.com>

A VMM interacts with the TDX module using a new instruction (SEAMCALL).
For instance, a TDX VMM does not have full access to the VM control
structure corresponding to VMX VMCS.  Instead, a VMM induces the TDX module
to act on behalf via SEAMCALLs.

Define C wrapper functions for SEAMCALLs for readability.

Some SEAMCALL APIs donate host pages to TDX module or guest TD, and the
donated pages are encrypted.  Those require the VMM to flush the cache
lines to avoid cache line alias.

Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@...el.com>
Reviewed-by: Binbin Wu <binbin.wu@...ux.intel.com>
Reviewed-by: Yuan Yao <yuan.yao@...el.com>
---
uAPI breakout v1:
- Make argument to C wrapper function struct kvm_tdx * or
  struct vcpu_tdx * .(Sean)
- Drop unused helpers (Kai)
- Fix bisectability issues in headers (Kai)
- Updates from seamcall overhaul (Kai)

v19:
- Update the commit message to match the patch by Yuan
- Use seamcall() and seamcall_ret() by paolo

v18:
- removed stub functions for __seamcall{,_ret}()
- Added Reviewed-by Binbin
- Make tdx_seamcall() use struct tdx_module_args instead of taking
  each inputs.

v15 -> v16:
- use struct tdx_module_args instead of struct tdx_module_output
- Add tdh_mem_sept_rd() for SEPT_VE_DISABLE=1.
---
 arch/x86/kvm/vmx/tdx.h     |  14 +-
 arch/x86/kvm/vmx/tdx_ops.h | 387 +++++++++++++++++++++++++++++++++++++
 2 files changed, 399 insertions(+), 2 deletions(-)
 create mode 100644 arch/x86/kvm/vmx/tdx_ops.h

diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
index faed454385ca..78f84c53a948 100644
--- a/arch/x86/kvm/vmx/tdx.h
+++ b/arch/x86/kvm/vmx/tdx.h
@@ -12,12 +12,14 @@ extern bool enable_tdx;
 
 struct kvm_tdx {
 	struct kvm kvm;
-	/* TDX specific members follow. */
+
+	unsigned long tdr_pa;
 };
 
 struct vcpu_tdx {
 	struct kvm_vcpu	vcpu;
-	/* TDX specific members follow. */
+
+	unsigned long tdvpr_pa;
 };
 
 static inline bool is_td(struct kvm *kvm)
@@ -40,6 +42,14 @@ static __always_inline struct vcpu_tdx *to_tdx(struct kvm_vcpu *vcpu)
 	return container_of(vcpu, struct vcpu_tdx, vcpu);
 }
 
+/*
+ * SEAMCALL wrappers
+ *
+ * Put it here as most of those wrappers need declaration of
+ * 'struct kvm_tdx' and 'struct vcpu_tdx'.
+ */
+#include "tdx_ops.h"
+
 #else
 static inline void tdx_bringup(void) {}
 static inline void tdx_cleanup(void) {}
diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h
new file mode 100644
index 000000000000..a9b9ad15f6a8
--- /dev/null
+++ b/arch/x86/kvm/vmx/tdx_ops.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Constants/data definitions for TDX SEAMCALLs
+ *
+ * This file is included by "tdx.h" after declarations of 'struct
+ * kvm_tdx' and 'struct vcpu_tdx'.  C file should never include
+ * this header directly.
+ */
+
+#ifndef __KVM_X86_TDX_OPS_H
+#define __KVM_X86_TDX_OPS_H
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+#include <asm/tdx.h>
+
+#include "x86.h"
+
+static inline u64 tdh_mng_addcx(struct kvm_tdx *kvm_tdx, hpa_t addr)
+{
+	struct tdx_module_args in = {
+		.rcx = addr,
+		.rdx = kvm_tdx->tdr_pa,
+	};
+
+	clflush_cache_range(__va(addr), PAGE_SIZE);
+	return seamcall(TDH_MNG_ADDCX, &in);
+}
+
+static inline u64 tdh_mem_page_add(struct kvm_tdx *kvm_tdx, gpa_t gpa,
+				   hpa_t hpa, hpa_t source,
+				   u64 *rcx, u64 *rdx)
+{
+	struct tdx_module_args in = {
+		.rcx = gpa,
+		.rdx = kvm_tdx->tdr_pa,
+		.r8 = hpa,
+		.r9 = source,
+	};
+	u64 ret;
+
+	clflush_cache_range(__va(hpa), PAGE_SIZE);
+	ret = seamcall_ret(TDH_MEM_PAGE_ADD, &in);
+
+	*rcx = in.rcx;
+	*rdx = in.rdx;
+
+	return ret;
+}
+
+static inline u64 tdh_mem_sept_add(struct kvm_tdx *kvm_tdx, gpa_t gpa,
+				   int level, hpa_t page,
+				   u64 *rcx, u64 *rdx)
+{
+	struct tdx_module_args in = {
+		.rcx = gpa | level,
+		.rdx = kvm_tdx->tdr_pa,
+		.r8 = page,
+	};
+	u64 ret;
+
+	clflush_cache_range(__va(page), PAGE_SIZE);
+
+	ret = seamcall_ret(TDH_MEM_SEPT_ADD, &in);
+
+	*rcx = in.rcx;
+	*rdx = in.rdx;
+
+	return ret;
+}
+
+static inline u64 tdh_mem_sept_remove(struct kvm_tdx *kvm_tdx, gpa_t gpa,
+				      int level, u64 *rcx, u64 *rdx)
+{
+	struct tdx_module_args in = {
+		.rcx = gpa | level,
+		.rdx = kvm_tdx->tdr_pa,
+	};
+	u64 ret;
+
+	ret = seamcall_ret(TDH_MEM_SEPT_REMOVE, &in);
+
+	*rcx = in.rcx;
+	*rdx = in.rdx;
+
+	return ret;
+}
+
+static inline u64 tdh_vp_addcx(struct vcpu_tdx *tdx, hpa_t addr)
+{
+	struct tdx_module_args in = {
+		.rcx = addr,
+		.rdx = tdx->tdvpr_pa,
+	};
+
+	clflush_cache_range(__va(addr), PAGE_SIZE);
+	return seamcall(TDH_VP_ADDCX, &in);
+}
+
+static inline u64 tdh_mem_page_aug(struct kvm_tdx *kvm_tdx, gpa_t gpa, hpa_t hpa,
+				   u64 *rcx, u64 *rdx)
+{
+	struct tdx_module_args in = {
+		.rcx = gpa,
+		.rdx = kvm_tdx->tdr_pa,
+		.r8 = hpa,
+	};
+	u64 ret;
+
+	clflush_cache_range(__va(hpa), PAGE_SIZE);
+	ret = seamcall_ret(TDH_MEM_PAGE_AUG, &in);
+
+	*rcx = in.rcx;
+	*rdx = in.rdx;
+
+	return ret;
+}
+
+static inline u64 tdh_mem_range_block(struct kvm_tdx *kvm_tdx, gpa_t gpa,
+				      int level, u64 *rcx, u64 *rdx)
+{
+	struct tdx_module_args in = {
+		.rcx = gpa | level,
+		.rdx = kvm_tdx->tdr_pa,
+	};
+	u64 ret;
+
+	ret = seamcall_ret(TDH_MEM_RANGE_BLOCK, &in);
+
+	*rcx = in.rcx;
+	*rdx = in.rdx;
+
+	return ret;
+}
+
+static inline u64 tdh_mng_key_config(struct kvm_tdx *kvm_tdx)
+{
+	struct tdx_module_args in = {
+		.rcx = kvm_tdx->tdr_pa,
+	};
+
+	return seamcall(TDH_MNG_KEY_CONFIG, &in);
+}
+
+static inline u64 tdh_mng_create(struct kvm_tdx *kvm_tdx, int hkid)
+{
+	struct tdx_module_args in = {
+		.rcx = kvm_tdx->tdr_pa,
+		.rdx = hkid,
+	};
+
+	clflush_cache_range(__va(kvm_tdx->tdr_pa), PAGE_SIZE);
+	return seamcall(TDH_MNG_CREATE, &in);
+}
+
+static inline u64 tdh_vp_create(struct vcpu_tdx *tdx)
+{
+	struct tdx_module_args in = {
+		.rcx = tdx->tdvpr_pa,
+		.rdx = to_kvm_tdx(tdx->vcpu.kvm)->tdr_pa,
+	};
+
+	clflush_cache_range(__va(tdx->tdvpr_pa), PAGE_SIZE);
+	return seamcall(TDH_VP_CREATE, &in);
+}
+
+static inline u64 tdh_mng_rd(struct kvm_tdx *kvm_tdx, u64 field, u64 *data)
+{
+	struct tdx_module_args in = {
+		.rcx = kvm_tdx->tdr_pa,
+		.rdx = field,
+	};
+	u64 ret;
+
+	ret = seamcall_ret(TDH_MNG_RD, &in);
+
+	*data = in.r8;
+
+	return ret;
+}
+
+static inline u64 tdh_mr_extend(struct kvm_tdx *kvm_tdx, gpa_t gpa,
+				u64 *rcx, u64 *rdx)
+{
+	struct tdx_module_args in = {
+		.rcx = gpa,
+		.rdx = kvm_tdx->tdr_pa,
+	};
+	u64 ret;
+
+	ret = seamcall_ret(TDH_MR_EXTEND, &in);
+
+	*rcx = in.rcx;
+	*rdx = in.rdx;
+
+	return ret;
+}
+
+static inline u64 tdh_mr_finalize(struct kvm_tdx *kvm_tdx)
+{
+	struct tdx_module_args in = {
+		.rcx = kvm_tdx->tdr_pa,
+	};
+
+	return seamcall(TDH_MR_FINALIZE, &in);
+}
+
+static inline u64 tdh_vp_flush(struct vcpu_tdx *tdx)
+{
+	struct tdx_module_args in = {
+		.rcx = tdx->tdvpr_pa,
+	};
+
+	return seamcall(TDH_VP_FLUSH, &in);
+}
+
+static inline u64 tdh_mng_vpflushdone(struct kvm_tdx *kvm_tdx)
+{
+	struct tdx_module_args in = {
+		.rcx = kvm_tdx->tdr_pa,
+	};
+
+	return seamcall(TDH_MNG_VPFLUSHDONE, &in);
+}
+
+static inline u64 tdh_mng_key_freeid(struct kvm_tdx *kvm_tdx)
+{
+	struct tdx_module_args in = {
+		.rcx = kvm_tdx->tdr_pa,
+	};
+
+	return seamcall(TDH_MNG_KEY_FREEID, &in);
+}
+
+static inline u64 tdh_mng_init(struct kvm_tdx *kvm_tdx, hpa_t td_params,
+			       u64 *rcx)
+{
+	struct tdx_module_args in = {
+		.rcx = kvm_tdx->tdr_pa,
+		.rdx = td_params,
+	};
+	u64 ret;
+
+	ret = seamcall_ret(TDH_MNG_INIT, &in);
+
+	*rcx = in.rcx;
+
+	return ret;
+}
+
+static inline u64 tdh_vp_init(struct vcpu_tdx *tdx, u64 rcx)
+{
+	struct tdx_module_args in = {
+		.rcx = tdx->tdvpr_pa,
+		.rdx = rcx,
+	};
+
+	return seamcall(TDH_VP_INIT, &in);
+}
+
+static inline u64 tdh_vp_init_apicid(struct vcpu_tdx *tdx, u64 rcx, u32 x2apicid)
+{
+	struct tdx_module_args in = {
+		.rcx = tdx->tdvpr_pa,
+		.rdx = rcx,
+		.r8 = x2apicid,
+	};
+
+	/* apicid requires version == 1. */
+	return seamcall(TDH_VP_INIT | (1ULL << TDX_VERSION_SHIFT), &in);
+}
+
+static inline u64 tdh_vp_rd(struct vcpu_tdx *tdx, u64 field, u64 *data)
+{
+	struct tdx_module_args in = {
+		.rcx = tdx->tdvpr_pa,
+		.rdx = field,
+	};
+	u64 ret;
+
+	ret = seamcall_ret(TDH_VP_RD, &in);
+
+	*data = in.r8;
+
+	return ret;
+}
+
+static inline u64 tdh_mng_key_reclaimid(struct kvm_tdx *kvm_tdx)
+{
+	struct tdx_module_args in = {
+		.rcx = kvm_tdx->tdr_pa,
+	};
+
+	return seamcall(TDH_MNG_KEY_RECLAIMID, &in);
+}
+
+static inline u64 tdh_phymem_page_reclaim(hpa_t page, u64 *rcx, u64 *rdx,
+					  u64 *r8)
+{
+	struct tdx_module_args in = {
+		.rcx = page,
+	};
+	u64 ret;
+
+	ret = seamcall_ret(TDH_PHYMEM_PAGE_RECLAIM, &in);
+
+	*rcx = in.rcx;
+	*rdx = in.rdx;
+	*r8 = in.r8;
+
+	return ret;
+}
+
+static inline u64 tdh_mem_page_remove(struct kvm_tdx *kvm_tdx, gpa_t gpa,
+				      int level, u64 *rcx, u64 *rdx)
+{
+	struct tdx_module_args in = {
+		.rcx = gpa | level,
+		.rdx = kvm_tdx->tdr_pa,
+	};
+	u64 ret;
+
+	ret = seamcall_ret(TDH_MEM_PAGE_REMOVE, &in);
+
+	*rcx = in.rcx;
+	*rdx = in.rdx;
+
+	return ret;
+}
+
+static inline u64 tdh_mem_track(struct kvm_tdx *kvm_tdx)
+{
+	struct tdx_module_args in = {
+		.rcx = kvm_tdx->tdr_pa,
+	};
+
+	return seamcall(TDH_MEM_TRACK, &in);
+}
+
+static inline u64 tdh_mem_range_unblock(struct kvm_tdx *kvm_tdx, gpa_t gpa,
+					int level, u64 *rcx, u64 *rdx)
+{
+	struct tdx_module_args in = {
+		.rcx = gpa | level,
+		.rdx = kvm_tdx->tdr_pa,
+	};
+	u64 ret;
+
+	ret = seamcall_ret(TDH_MEM_RANGE_UNBLOCK, &in);
+
+	*rcx = in.rcx;
+	*rdx = in.rdx;
+
+	return ret;
+}
+
+static inline u64 tdh_phymem_cache_wb(bool resume)
+{
+	struct tdx_module_args in = {
+		.rcx = resume ? 1 : 0,
+	};
+
+	return seamcall(TDH_PHYMEM_CACHE_WB, &in);
+}
+
+static inline u64 tdh_phymem_page_wbinvd(hpa_t page)
+{
+	struct tdx_module_args in = {
+		.rcx = page,
+	};
+
+	return seamcall(TDH_PHYMEM_PAGE_WBINVD, &in);
+}
+
+static inline u64 tdh_vp_wr(struct vcpu_tdx *tdx, u64 field, u64 val, u64 mask)
+{
+	struct tdx_module_args in = {
+		.rcx = tdx->tdvpr_pa,
+		.rdx = field,
+		.r8 = val,
+		.r9 = mask,
+	};
+
+	return seamcall(TDH_VP_WR, &in);
+}
+
+#endif /* __KVM_X86_TDX_OPS_H */
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ