[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250502130828.4071412-11-kirill.shutemov@linux.intel.com>
Date: Fri, 2 May 2025 16:08:26 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: pbonzini@...hat.com,
seanjc@...gle.com
Cc: rick.p.edgecombe@...el.com,
isaku.yamahata@...el.com,
kai.huang@...el.com,
yan.y.zhao@...el.com,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
kvm@...r.kernel.org,
x86@...nel.org,
linux-coco@...ts.linux.dev,
linux-kernel@...r.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [RFC, PATCH 10/12] KVM: TDX: Hookup phys_prepare() and phys_cleanup() kvm_x86_ops
Allocate PAMT memory from a per-VCPU pool in kvm_x86_ops::phys_prepare()
and release memory in kvm_x86_ops::phys_cleanup().
The TDP code invokes these callbacks to handle PAMT memory management.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
---
arch/x86/kvm/vmx/main.c | 2 ++
arch/x86/kvm/vmx/tdx.c | 30 ++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/x86_ops.h | 9 +++++++++
virt/kvm/kvm_main.c | 1 +
4 files changed, 42 insertions(+)
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 94d5d907d37b..665a3dbd4ba5 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -63,6 +63,8 @@ static __init int vt_hardware_setup(void)
vt_x86_ops.free_external_spt = tdx_sept_free_private_spt;
vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte;
vt_x86_ops.protected_apic_has_interrupt = tdx_protected_apic_has_interrupt;
+ vt_x86_ops.phys_prepare = tdx_phys_prepare;
+ vt_x86_ops.phys_cleanup = tdx_phys_cleanup;
}
return 0;
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 18c4ae00cd8d..0f06ae7ff6b9 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1958,6 +1958,36 @@ int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
return tdx_sept_drop_private_spte(kvm, gfn, level, page);
}
+int tdx_phys_prepare(struct kvm_vcpu *vcpu, kvm_pfn_t pfn)
+{
+ unsigned long hpa = pfn << PAGE_SHIFT;
+ atomic_t *pamt_refcount;
+ LIST_HEAD(pamt_pages);
+
+ if (!tdx_supports_dynamic_pamt(tdx_sysinfo))
+ return 0;
+
+ pamt_refcount = tdx_get_pamt_refcount(hpa);
+ if (atomic_inc_not_zero(pamt_refcount))
+ return 0;
+
+ for (int i = 0; i < tdx_nr_pamt_pages(tdx_sysinfo); i++) {
+ struct page *page;
+ void *p;
+
+ p = kvm_mmu_memory_cache_alloc(&vcpu->arch.pamt_page_cache);
+ page = virt_to_page(p);
+ list_add(&page->lru, &pamt_pages);
+ }
+
+ return tdx_pamt_add(pamt_refcount, hpa, &pamt_pages);
+}
+
+void tdx_phys_cleanup(kvm_pfn_t pfn)
+{
+ tdx_pamt_put(pfn_to_page(pfn));
+}
+
void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector)
{
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 6bf8be570b2e..111f16c3039f 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -158,6 +158,8 @@ int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
enum pg_level level, kvm_pfn_t pfn);
int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
enum pg_level level, kvm_pfn_t pfn);
+int tdx_phys_prepare(struct kvm_vcpu *vcpu, kvm_pfn_t pfn);
+void tdx_phys_cleanup(kvm_pfn_t pfn);
void tdx_flush_tlb_current(struct kvm_vcpu *vcpu);
void tdx_flush_tlb_all(struct kvm_vcpu *vcpu);
@@ -224,6 +226,13 @@ static inline int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
return -EOPNOTSUPP;
}
+static inline int tdx_phys_prepare(struct kvm_vcpu *vcpu, kvm_pfn_t pfn)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tdx_phys_cleanup(kvm_pfn_t pfn) {}
+
static inline void tdx_flush_tlb_current(struct kvm_vcpu *vcpu) {}
static inline void tdx_flush_tlb_all(struct kvm_vcpu *vcpu) {}
static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 69782df3617f..c3ba3ca37940 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -436,6 +436,7 @@ void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
BUG_ON(!p);
return p;
}
+EXPORT_SYMBOL_GPL(kvm_mmu_memory_cache_alloc);
#endif
static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
--
2.47.2
Powered by blists - more mailing lists