[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250609191340.2051741-8-kirill.shutemov@linux.intel.com>
Date: Mon, 9 Jun 2025 22:13:35 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: pbonzini@...hat.com,
seanjc@...gle.com,
dave.hansen@...ux.intel.com
Cc: rick.p.edgecombe@...el.com,
isaku.yamahata@...el.com,
kai.huang@...el.com,
yan.y.zhao@...el.com,
chao.gao@...el.com,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
kvm@...r.kernel.org,
x86@...nel.org,
linux-coco@...ts.linux.dev,
linux-kernel@...r.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [PATCHv2 07/12] KVM: TDX: Preallocate PAMT pages to be used in page fault path
Preallocate a page to be used in the link_external_spt() and
set_external_spte() paths.
In the worst-case scenario, handling a page fault might require a
tdx_nr_pamt_pages() pages for each page table level.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/include/asm/tdx.h | 2 ++
arch/x86/kvm/mmu/mmu.c | 7 +++++++
arch/x86/virt/vmx/tdx/tdx.c | 3 ++-
4 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 330cdcbed1a6..02dbbf848182 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -849,6 +849,8 @@ struct kvm_vcpu_arch {
*/
struct kvm_mmu_memory_cache mmu_external_spt_cache;
+ struct kvm_mmu_memory_cache pamt_page_cache;
+
/*
* QEMU userspace and the guest each have their own FPU state.
* In vcpu_run, we switch between the user and guest FPU contexts.
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index d9a77147412f..47092eb13eb3 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -115,6 +115,7 @@ int tdx_guest_keyid_alloc(void);
u32 tdx_get_nr_guest_keyids(void);
void tdx_guest_keyid_free(unsigned int keyid);
+int tdx_nr_pamt_pages(void);
struct page *tdx_alloc_page(void);
void tdx_free_page(struct page *page);
@@ -188,6 +189,7 @@ static inline int tdx_enable(void) { return -ENODEV; }
static inline u32 tdx_get_nr_guest_keyids(void) { return 0; }
static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
static inline const struct tdx_sys_info *tdx_get_sysinfo(void) { return NULL; }
+static inline int tdx_nr_pamt_pages(void) { return 0; }
#endif /* CONFIG_INTEL_TDX_HOST */
#endif /* !__ASSEMBLER__ */
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index cbc84c6abc2e..d99bb27b5b01 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -616,6 +616,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
if (r)
return r;
}
+
+ r = kvm_mmu_topup_memory_cache(&vcpu->arch.pamt_page_cache,
+ tdx_nr_pamt_pages() * PT64_ROOT_MAX_LEVEL);
+ if (r)
+ return r;
+
return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
PT64_ROOT_MAX_LEVEL);
}
@@ -626,6 +632,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_external_spt_cache);
+ kvm_mmu_free_memory_cache(&vcpu->arch.pamt_page_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
}
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index c514c60e8c8d..4f9eaba4af4a 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -2001,13 +2001,14 @@ u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page)
}
EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid);
-static int tdx_nr_pamt_pages(void)
+int tdx_nr_pamt_pages(void)
{
if (!tdx_supports_dynamic_pamt(&tdx_sysinfo))
return 0;
return tdx_sysinfo.tdmr.pamt_4k_entry_size * PTRS_PER_PTE / PAGE_SIZE;
}
+EXPORT_SYMBOL_GPL(tdx_nr_pamt_pages);
static u64 tdh_phymem_pamt_add(unsigned long hpa,
struct list_head *pamt_pages)
--
2.47.2
Powered by blists - more mailing lists