[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260129011517.3545883-22-seanjc@google.com>
Date: Wed, 28 Jan 2026 17:14:53 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Thomas Gleixner <tglx@...nel.org>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
Kiryl Shutsemau <kas@...nel.org>, Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-coco@...ts.linux.dev,
kvm@...r.kernel.org, Kai Huang <kai.huang@...el.com>,
Rick Edgecombe <rick.p.edgecombe@...el.com>, Yan Zhao <yan.y.zhao@...el.com>,
Vishal Annapurve <vannapurve@...gle.com>, Ackerley Tng <ackerleytng@...gle.com>,
Sagi Shahar <sagis@...gle.com>, Binbin Wu <binbin.wu@...ux.intel.com>,
Xiaoyao Li <xiaoyao.li@...el.com>, Isaku Yamahata <isaku.yamahata@...el.com>
Subject: [RFC PATCH v5 21/45] x86/tdx: Add APIs to support get/put of DPAMT
entries from KVM, under spinlock
From: Rick Edgecombe <rick.p.edgecombe@...el.com>
Implement a PAMT "caching" scheme, similar to KVM's pre-allocated cache of
MMU assets, along with APIs to allow KVM to pre-allocate PAMT pages before
acquiring its mmu_lock spinlock, but wait until S-EPT entries are created
to actually update the Dynamic PAMT.
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@...el.com>
Co-developed-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/include/asm/tdx.h | 17 ++++++++++
arch/x86/virt/vmx/tdx/tdx.c | 65 +++++++++++++++++++++++++++++++++----
2 files changed, 76 insertions(+), 6 deletions(-)
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index fa29be18498c..c39e2920d0c3 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -136,6 +136,23 @@ static inline bool tdx_supports_dynamic_pamt(const struct tdx_sys_info *sysinfo)
return false; /* To be enabled when kernel is ready */
}
+/* Simple structure for pre-allocating Dynamic PAMT pages outside of locks. */
+struct tdx_pamt_cache {
+ struct list_head page_list;
+ int cnt;
+};
+
+static inline void tdx_init_pamt_cache(struct tdx_pamt_cache *cache)
+{
+ INIT_LIST_HEAD(&cache->page_list);
+ cache->cnt = 0;
+}
+
+void tdx_free_pamt_cache(struct tdx_pamt_cache *cache);
+int tdx_topup_pamt_cache(struct tdx_pamt_cache *cache, unsigned long npages);
+int tdx_pamt_get(struct page *page, struct tdx_pamt_cache *cache);
+void tdx_pamt_put(struct page *page);
+
void tdx_quirk_reset_page(struct page *page);
int tdx_guest_keyid_alloc(void);
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index d333d2790913..53b29c827520 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -2064,13 +2064,34 @@ u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page)
}
EXPORT_SYMBOL_FOR_KVM(tdh_phymem_page_wbinvd_hkid);
-static int alloc_pamt_array(u64 *pa_array)
+static struct page *tdx_alloc_page_pamt_cache(struct tdx_pamt_cache *cache)
+{
+ struct page *page;
+
+ page = list_first_entry_or_null(&cache->page_list, struct page, lru);
+ if (page) {
+ list_del(&page->lru);
+ cache->cnt--;
+ }
+
+ return page;
+}
+
+static struct page *alloc_dpamt_page(struct tdx_pamt_cache *cache)
+{
+ if (cache)
+ return tdx_alloc_page_pamt_cache(cache);
+
+ return alloc_page(GFP_KERNEL_ACCOUNT);
+}
+
+static int alloc_pamt_array(u64 *pa_array, struct tdx_pamt_cache *cache)
{
struct page *page;
int i;
for (i = 0; i < tdx_dpamt_entry_pages(); i++) {
- page = alloc_page(GFP_KERNEL_ACCOUNT);
+ page = alloc_dpamt_page(cache);
if (!page)
goto err;
pa_array[i] = page_to_phys(page);
@@ -2151,7 +2172,7 @@ static u64 tdh_phymem_pamt_remove(struct page *page, u64 *pamt_pa_array)
static DEFINE_SPINLOCK(pamt_lock);
/* Bump PAMT refcount for the given page and allocate PAMT memory if needed */
-static int tdx_pamt_get(struct page *page)
+int tdx_pamt_get(struct page *page, struct tdx_pamt_cache *cache)
{
u64 pamt_pa_array[MAX_NR_DPAMT_ARGS];
atomic_t *pamt_refcount;
@@ -2170,7 +2191,7 @@ static int tdx_pamt_get(struct page *page)
if (atomic_inc_not_zero(pamt_refcount))
return 0;
- ret = alloc_pamt_array(pamt_pa_array);
+ ret = alloc_pamt_array(pamt_pa_array, cache);
if (ret)
goto out_free;
@@ -2222,12 +2243,13 @@ static int tdx_pamt_get(struct page *page)
free_pamt_array(pamt_pa_array);
return ret;
}
+EXPORT_SYMBOL_FOR_KVM(tdx_pamt_get);
/*
* Drop PAMT refcount for the given page and free PAMT memory if it is no
* longer needed.
*/
-static void tdx_pamt_put(struct page *page)
+void tdx_pamt_put(struct page *page)
{
u64 pamt_pa_array[MAX_NR_DPAMT_ARGS];
atomic_t *pamt_refcount;
@@ -2281,6 +2303,37 @@ static void tdx_pamt_put(struct page *page)
*/
free_pamt_array(pamt_pa_array);
}
+EXPORT_SYMBOL_FOR_KVM(tdx_pamt_put);
+
+void tdx_free_pamt_cache(struct tdx_pamt_cache *cache)
+{
+ struct page *page;
+
+ while ((page = tdx_alloc_page_pamt_cache(cache)))
+ __free_page(page);
+}
+EXPORT_SYMBOL_FOR_KVM(tdx_free_pamt_cache);
+
+int tdx_topup_pamt_cache(struct tdx_pamt_cache *cache, unsigned long npages)
+{
+ if (WARN_ON_ONCE(!tdx_supports_dynamic_pamt(&tdx_sysinfo)))
+ return 0;
+
+ npages *= tdx_dpamt_entry_pages();
+
+ while (cache->cnt < npages) {
+ struct page *page = alloc_page(GFP_KERNEL_ACCOUNT);
+
+ if (!page)
+ return -ENOMEM;
+
+ list_add(&page->lru, &cache->page_list);
+ cache->cnt++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_KVM(tdx_topup_pamt_cache);
/*
* Return a page that can be gifted to the TDX-Module for use as a "control"
@@ -2296,7 +2349,7 @@ struct page *__tdx_alloc_control_page(gfp_t gfp)
if (!page)
return NULL;
- if (tdx_pamt_get(page)) {
+ if (tdx_pamt_get(page, NULL)) {
__free_page(page);
return NULL;
}
--
2.53.0.rc1.217.geba53bf80e-goog
Powered by blists - more mailing lists