[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250107142719.179636-13-yilun.xu@linux.intel.com>
Date: Tue, 7 Jan 2025 22:27:19 +0800
From: Xu Yilun <yilun.xu@...ux.intel.com>
To: kvm@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
linux-media@...r.kernel.org,
linaro-mm-sig@...ts.linaro.org,
sumit.semwal@...aro.org,
christian.koenig@....com,
pbonzini@...hat.com,
seanjc@...gle.com,
alex.williamson@...hat.com,
jgg@...dia.com,
vivek.kasireddy@...el.com,
dan.j.williams@...el.com,
aik@....com
Cc: yilun.xu@...el.com,
yilun.xu@...ux.intel.com,
linux-coco@...ts.linux.dev,
linux-kernel@...r.kernel.org,
lukas@...ner.de,
yan.y.zhao@...el.com,
daniel.vetter@...ll.ch,
leon@...nel.org,
baolu.lu@...ux.intel.com,
zhenzhong.duan@...el.com,
tao1.su@...el.com
Subject: [RFC PATCH 12/12] KVM: TDX: Implement TDX specific private MMIO map/unmap for SEPT
Implement TDX specific private MMIO map/unmap in existing TDP MMU hooks.
Signed-off-by: Yan Zhao <yan.y.zhao@...el.com>
Signed-off-by: Xu Yilun <yilun.xu@...ux.intel.com>
---
TODO: This patch is still based on the earlier kvm-coco-queue version
(v6.13-rc2). Will follow up the latest SEAMCALL wrapper change. [1]
[1] https://lore.kernel.org/all/20250101074959.412696-1-pbonzini@redhat.com/
---
arch/x86/include/asm/tdx.h | 3 ++
arch/x86/kvm/vmx/tdx.c | 57 +++++++++++++++++++++++++++++++++++--
arch/x86/virt/vmx/tdx/tdx.c | 52 +++++++++++++++++++++++++++++++++
arch/x86/virt/vmx/tdx/tdx.h | 3 ++
4 files changed, 113 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index 01409a59224d..7d158bbf79f4 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -151,6 +151,9 @@ u64 tdh_mem_page_remove(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx);
u64 tdh_phymem_cache_wb(bool resume);
u64 tdh_phymem_page_wbinvd_tdr(u64 tdr);
u64 tdh_phymem_page_wbinvd_hkid(u64 hpa, u64 hkid);
+u64 tdh_mmio_map(u64 tdr, u64 gpa, u64 level, u64 hpa, u64 *rcx, u64 *rdx);
+u64 tdh_mmio_block(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx);
+u64 tdh_mmio_unmap(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx);
#else
static inline void tdx_init(void) { }
static inline int tdx_cpu_enable(void) { return -ENODEV; }
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 69ef9c967fbf..9b43a2ee2203 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1576,6 +1576,29 @@ static int tdx_mem_page_aug(struct kvm *kvm, gfn_t gfn,
return 0;
}
+static int tdx_mmio_map(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, kvm_pfn_t pfn)
+{
+ int tdx_level = pg_level_to_tdx_sept_level(level);
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ hpa_t hpa = pfn_to_hpa(pfn);
+ gpa_t gpa = gfn_to_gpa(gfn);
+ u64 entry, level_state;
+ u64 err;
+
+ err = tdh_mmio_map(kvm_tdx->tdr_pa, gpa, tdx_level, hpa,
+ &entry, &level_state);
+ if (unlikely(err & TDX_OPERAND_BUSY))
+ return -EBUSY;
+
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MMIO_MAP, err, entry, level_state);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/*
* KVM_TDX_INIT_MEM_REGION calls kvm_gmem_populate() to get guest pages and
* tdx_gmem_post_populate() to premap page table pages into private EPT.
@@ -1610,6 +1633,9 @@ int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
return -EINVAL;
+ if (kvm_is_mmio_pfn(pfn))
+ return tdx_mmio_map(kvm, gfn, level, pfn);
+
/*
* Because guest_memfd doesn't support page migration with
* a_ops->migrate_folio (yet), no callback is triggered for KVM on page
@@ -1647,6 +1673,20 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
return -EINVAL;
+ if (kvm_is_mmio_pfn(pfn)) {
+ do {
+ err = tdh_mmio_unmap(kvm_tdx->tdr_pa, gpa, tdx_level,
+ &entry, &level_state);
+ } while (unlikely(err == TDX_ERROR_SEPT_BUSY));
+
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MMIO_UNMAP, err, entry, level_state);
+ return -EIO;
+ }
+
+ return 0;
+ }
+
do {
/*
* When zapping private page, write lock is held. So no race
@@ -1715,7 +1755,7 @@ int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
}
static int tdx_sept_zap_private_spte(struct kvm *kvm, gfn_t gfn,
- enum pg_level level)
+ enum pg_level level, kvm_pfn_t pfn)
{
int tdx_level = pg_level_to_tdx_sept_level(level);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
@@ -1725,6 +1765,19 @@ static int tdx_sept_zap_private_spte(struct kvm *kvm, gfn_t gfn,
/* For now large page isn't supported yet. */
WARN_ON_ONCE(level != PG_LEVEL_4K);
+ if (kvm_is_mmio_pfn(pfn)) {
+ err = tdh_mmio_block(kvm_tdx->tdr_pa, gpa, tdx_level,
+ &entry, &level_state);
+ if (unlikely(err == TDX_ERROR_SEPT_BUSY))
+ return -EAGAIN;
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MMIO_BLOCK, err, entry, level_state);
+ return -EIO;
+ }
+
+ return 0;
+ }
+
err = tdh_mem_range_block(kvm_tdx->tdr_pa, gpa, tdx_level, &entry, &level_state);
if (unlikely(err == TDX_ERROR_SEPT_BUSY))
return -EAGAIN;
@@ -1816,7 +1869,7 @@ int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
return -EINVAL;
- ret = tdx_sept_zap_private_spte(kvm, gfn, level);
+ ret = tdx_sept_zap_private_spte(kvm, gfn, level, pfn);
if (ret)
return ret;
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index 57195cf0d832..3b2109877a39 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -1951,3 +1951,55 @@ u64 tdh_phymem_page_wbinvd_hkid(u64 hpa, u64 hkid)
return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
}
EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid);
+
+u64 tdh_mmio_map(u64 tdr, u64 gpa, u64 level, u64 hpa, u64 *rcx, u64 *rdx)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdr,
+ .r8 = hpa,
+ };
+ u64 ret;
+
+ ret = tdx_seamcall_sept(TDH_MMIO_MAP, &args);
+
+ *rcx = args.rcx;
+ *rdx = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mmio_map);
+
+u64 tdh_mmio_block(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdr,
+ };
+ u64 ret;
+
+ ret = tdx_seamcall_sept(TDH_MMIO_BLOCK, &args);
+
+ *rcx = args.rcx;
+ *rdx = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mmio_block);
+
+u64 tdh_mmio_unmap(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdr,
+ };
+ u64 ret;
+
+ ret = tdx_seamcall_sept(TDH_MMIO_UNMAP, &args);
+
+ *rcx = args.rcx;
+ *rdx = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mmio_unmap);
diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h
index 58d5754dcb4d..a83a90a043a5 100644
--- a/arch/x86/virt/vmx/tdx/tdx.h
+++ b/arch/x86/virt/vmx/tdx/tdx.h
@@ -49,6 +49,9 @@
#define TDH_VP_WR 43
#define TDH_PHYMEM_PAGE_WBINVD 41
#define TDH_SYS_CONFIG 45
+#define TDH_MMIO_MAP 158
+#define TDH_MMIO_BLOCK 159
+#define TDH_MMIO_UNMAP 160
/*
* SEAMCALL leaf:
--
2.25.1
Powered by blists - more mailing lists