[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250502130828.4071412-4-kirill.shutemov@linux.intel.com>
Date: Fri, 2 May 2025 16:08:19 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: pbonzini@...hat.com,
seanjc@...gle.com
Cc: rick.p.edgecombe@...el.com,
isaku.yamahata@...el.com,
kai.huang@...el.com,
yan.y.zhao@...el.com,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
kvm@...r.kernel.org,
x86@...nel.org,
linux-coco@...ts.linux.dev,
linux-kernel@...r.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [RFC, PATCH 03/12] x86/virt/tdx: Add wrappers for TDH.PHYMEM.PAMT.ADD/REMOVE
On a system with Dynamic PAMT enabled, the kernel must allocate memory
for PAMT_4K as needed and reclaim it when it is no longer in use.
The TDX module requires space to store 16 bytes of metadata per page or
8k for every 2M range of physical memory. The TDX module takes this 8k
of memory as a pair of 4k pages. These pages do not need to be contiguous.
The number of pages needed to cover 2M range can grow if size of PAMT
entry increases. tdx_nr_pamt_pages() reports needed number of pages.
TDH.PHYMEM.PAMT.ADD populates PAMT_4K for a given HPA. The kernel must
provide addresses for two pages, covering a 2M range starting from HPA.
TDH.PHYMEM.PAMT.REMOVE withdraws PAMT_4K memory for a given HPA,
returning the addresses of the pages used for PAMT_4K before the call.
Add wrappers for these SEAMCALLs.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
---
arch/x86/include/asm/tdx.h | 9 ++++++++
arch/x86/virt/vmx/tdx/tdx.c | 45 +++++++++++++++++++++++++++++++++++++
arch/x86/virt/vmx/tdx/tdx.h | 2 ++
3 files changed, 56 insertions(+)
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index 9701876d4e16..a134cf3ecd17 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -130,6 +130,11 @@ static inline bool tdx_supports_dynamic_pamt(const struct tdx_sys_info *sysinfo)
return false; /* To be enabled when kernel is ready */
}
+static inline int tdx_nr_pamt_pages(const struct tdx_sys_info *sysinfo)
+{
+ return sysinfo->tdmr.pamt_4k_entry_size * PTRS_PER_PTE / PAGE_SIZE;
+}
+
int tdx_guest_keyid_alloc(void);
u32 tdx_get_nr_guest_keyids(void);
void tdx_guest_keyid_free(unsigned int keyid);
@@ -197,6 +202,9 @@ u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u6
u64 tdh_phymem_cache_wb(bool resume);
u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td);
u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page);
+u64 tdh_phymem_pamt_add(unsigned long hpa, struct list_head *pamt_pages);
+u64 tdh_phymem_pamt_remove(unsigned long hpa, struct list_head *pamt_pages);
+
#else
static inline void tdx_init(void) { }
static inline int tdx_cpu_enable(void) { return -ENODEV; }
@@ -204,6 +212,7 @@ static inline int tdx_enable(void) { return -ENODEV; }
static inline u32 tdx_get_nr_guest_keyids(void) { return 0; }
static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
static inline const struct tdx_sys_info *tdx_get_sysinfo(void) { return NULL; }
+static inline int tdx_nr_pamt_pages(const struct tdx_sys_info *sysinfo) { return 0; }
#endif /* CONFIG_INTEL_TDX_HOST */
#endif /* !__ASSEMBLER__ */
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index 00e07a0c908a..29defdb7f6bc 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -1999,3 +1999,48 @@ u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page)
return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
}
EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid);
+
+u64 tdh_phymem_pamt_add(unsigned long hpa, struct list_head *pamt_pages)
+{
+ struct tdx_module_args args = {
+ .rcx = hpa,
+ };
+ struct page *page;
+ u64 *p;
+
+ WARN_ON_ONCE(!IS_ALIGNED(hpa & PAGE_MASK, PMD_SIZE));
+
+ p = &args.rdx;
+ list_for_each_entry(page, pamt_pages, lru) {
+ *p = page_to_phys(page);
+ p++;
+ }
+
+ return seamcall(TDH_PHYMEM_PAMT_ADD, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_pamt_add);
+
+u64 tdh_phymem_pamt_remove(unsigned long hpa, struct list_head *pamt_pages)
+{
+ struct tdx_module_args args = {
+ .rcx = hpa,
+ };
+ struct page *page;
+ u64 *p, ret;
+
+ WARN_ON_ONCE(!IS_ALIGNED(hpa & PAGE_MASK, PMD_SIZE));
+
+ ret = seamcall_ret(TDH_PHYMEM_PAMT_REMOVE, &args);
+ if (ret)
+ return ret;
+
+ p = &args.rdx;
+ for (int i = 0; i < tdx_nr_pamt_pages(&tdx_sysinfo); i++) {
+ page = phys_to_page(*p);
+ list_add(&page->lru, pamt_pages);
+ p++;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_pamt_remove);
diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h
index 82bb82be8567..46c4214b79fb 100644
--- a/arch/x86/virt/vmx/tdx/tdx.h
+++ b/arch/x86/virt/vmx/tdx/tdx.h
@@ -46,6 +46,8 @@
#define TDH_PHYMEM_PAGE_WBINVD 41
#define TDH_VP_WR 43
#define TDH_SYS_CONFIG 45
+#define TDH_PHYMEM_PAMT_ADD 58
+#define TDH_PHYMEM_PAMT_REMOVE 59
/*
* SEAMCALL leaf:
--
2.47.2
Powered by blists - more mailing lists