[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260129011517.3545883-30-seanjc@google.com>
Date: Wed, 28 Jan 2026 17:15:01 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Thomas Gleixner <tglx@...nel.org>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
Kiryl Shutsemau <kas@...nel.org>, Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-coco@...ts.linux.dev,
kvm@...r.kernel.org, Kai Huang <kai.huang@...el.com>,
Rick Edgecombe <rick.p.edgecombe@...el.com>, Yan Zhao <yan.y.zhao@...el.com>,
Vishal Annapurve <vannapurve@...gle.com>, Ackerley Tng <ackerleytng@...gle.com>,
Sagi Shahar <sagis@...gle.com>, Binbin Wu <binbin.wu@...ux.intel.com>,
Xiaoyao Li <xiaoyao.li@...el.com>, Isaku Yamahata <isaku.yamahata@...el.com>
Subject: [RFC PATCH v5 29/45] x86/virt/tdx: Get/Put DPAMT page pair if and
only if mapping size is 4KB
From: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Elide the guts of getting/putting a Dynamic PAMT entry when the associated
mapping is greater than 4KiB, in which case static PAMT pages are used and
there's no need to (un)install extra PAMT pages.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
[Yan: Move level checking to callers of tdx_pamt_{get/put}()]
Signed-off-by: Yan Zhao <yan.y.zhao@...el.com>
[sean: move level checking back to tdx_pamt_{get/put}()]
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/include/asm/tdx.h | 16 ++++++++++++++--
arch/x86/kvm/vmx/tdx.c | 6 +++---
arch/x86/virt/vmx/tdx/tdx.c | 12 ++++++------
3 files changed, 23 insertions(+), 11 deletions(-)
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index e61b0b3cc403..50feea01b066 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -154,8 +154,20 @@ static inline void tdx_init_pamt_cache(struct tdx_pamt_cache *cache)
void tdx_free_pamt_cache(struct tdx_pamt_cache *cache);
int tdx_topup_pamt_cache(struct tdx_pamt_cache *cache, unsigned long npages);
-int tdx_pamt_get(u64 pfn, struct tdx_pamt_cache *cache);
-void tdx_pamt_put(u64 pfn);
+int __tdx_pamt_get(u64 pfn, struct tdx_pamt_cache *cache);
+void __tdx_pamt_put(u64 pfn);
+
+static inline int tdx_pamt_get(u64 pfn, enum pg_level level,
+ struct tdx_pamt_cache *cache)
+{
+ return level == PG_LEVEL_4K ? __tdx_pamt_get(pfn, cache) : 0;
+}
+
+static inline void tdx_pamt_put(u64 pfn, enum pg_level level)
+{
+ if (level == PG_LEVEL_4K)
+ __tdx_pamt_put(pfn);
+}
void __tdx_quirk_reset_page(u64 pfn, enum pg_level level);
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index aca556923822..bd5d902da303 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1729,7 +1729,7 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
WARN_ON_ONCE((mirror_spte & VMX_EPT_RWX_MASK) != VMX_EPT_RWX_MASK);
- ret = tdx_pamt_get(pfn, &tdx->pamt_cache);
+ ret = tdx_pamt_get(pfn, level, &tdx->pamt_cache);
if (ret)
return ret;
@@ -1751,7 +1751,7 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
ret = tdx_mem_page_add(kvm, gfn, level, pfn);
if (ret)
- tdx_pamt_put(pfn);
+ tdx_pamt_put(pfn, level);
return ret;
}
@@ -1872,7 +1872,7 @@ static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
return;
__tdx_quirk_reset_page(pfn, level);
- tdx_pamt_put(pfn);
+ tdx_pamt_put(pfn, level);
}
void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index 411e5feef39f..cff325fdec79 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -2195,7 +2195,7 @@ static u64 tdh_phymem_pamt_remove(u64 pfn, u64 *pamt_pa_array)
static DEFINE_SPINLOCK(pamt_lock);
/* Bump PAMT refcount for the given page and allocate PAMT memory if needed */
-int tdx_pamt_get(u64 pfn, struct tdx_pamt_cache *cache)
+int __tdx_pamt_get(u64 pfn, struct tdx_pamt_cache *cache)
{
u64 pamt_pa_array[MAX_NR_DPAMT_ARGS];
atomic_t *pamt_refcount;
@@ -2266,13 +2266,13 @@ int tdx_pamt_get(u64 pfn, struct tdx_pamt_cache *cache)
free_pamt_array(pamt_pa_array);
return ret;
}
-EXPORT_SYMBOL_FOR_KVM(tdx_pamt_get);
+EXPORT_SYMBOL_FOR_KVM(__tdx_pamt_get);
/*
* Drop PAMT refcount for the given page and free PAMT memory if it is no
* longer needed.
*/
-void tdx_pamt_put(u64 pfn)
+void __tdx_pamt_put(u64 pfn)
{
u64 pamt_pa_array[MAX_NR_DPAMT_ARGS];
atomic_t *pamt_refcount;
@@ -2326,7 +2326,7 @@ void tdx_pamt_put(u64 pfn)
*/
free_pamt_array(pamt_pa_array);
}
-EXPORT_SYMBOL_FOR_KVM(tdx_pamt_put);
+EXPORT_SYMBOL_FOR_KVM(__tdx_pamt_put);
void tdx_free_pamt_cache(struct tdx_pamt_cache *cache)
{
@@ -2372,7 +2372,7 @@ struct page *__tdx_alloc_control_page(gfp_t gfp)
if (!page)
return NULL;
- if (tdx_pamt_get(page_to_pfn(page), NULL)) {
+ if (__tdx_pamt_get(page_to_pfn(page), NULL)) {
__free_page(page);
return NULL;
}
@@ -2390,7 +2390,7 @@ void __tdx_free_control_page(struct page *page)
if (!page)
return;
- tdx_pamt_put(page_to_pfn(page));
+ __tdx_pamt_put(page_to_pfn(page));
__free_page(page);
}
EXPORT_SYMBOL_FOR_KVM(__tdx_free_control_page);
--
2.53.0.rc1.217.geba53bf80e-goog
Powered by blists - more mailing lists