[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260129011517.3545883-39-seanjc@google.com>
Date: Wed, 28 Jan 2026 17:15:10 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Thomas Gleixner <tglx@...nel.org>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
Kiryl Shutsemau <kas@...nel.org>, Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-coco@...ts.linux.dev,
kvm@...r.kernel.org, Kai Huang <kai.huang@...el.com>,
Rick Edgecombe <rick.p.edgecombe@...el.com>, Yan Zhao <yan.y.zhao@...el.com>,
Vishal Annapurve <vannapurve@...gle.com>, Ackerley Tng <ackerleytng@...gle.com>,
Sagi Shahar <sagis@...gle.com>, Binbin Wu <binbin.wu@...ux.intel.com>,
Xiaoyao Li <xiaoyao.li@...el.com>, Isaku Yamahata <isaku.yamahata@...el.com>
Subject: [RFC PATCH v5 38/45] KVM: x86/mmu: Add Dynamic PAMT support in TDP
MMU for vCPU-induced page split
Extend the TDP MMU to support vCPU-induced hugepage splits in mirror roots
when Dynamic PAMT is enabled. I.e. top-up the PAMT cache when allocating
a new child page table, so that if the split is successful, there will be
a PAMT paging waiting to associated with the new less/non-huge mapping.
Note, the allocation is for the guest memory, not the S-EPT page, as PAMT
pages are accounted up front by .alloc_external_sp().
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/mmu/tdp_mmu.c | 25 ++++++++++++++++---------
arch/x86/kvm/vmx/tdx.c | 3 +++
2 files changed, 19 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 4f5b80f0ca03..e32034bfca5a 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1456,21 +1456,28 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct tdp_iter *iter)
return NULL;
sp->spt = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
- if (!sp->spt) {
- kmem_cache_free(mmu_page_header_cache, sp);
- return NULL;
- }
+ if (!sp->spt)
+ goto err_spt;
if (is_mirror_sptep(iter->sptep)) {
sp->external_spt = (void *)kvm_x86_call(alloc_external_sp)(GFP_KERNEL_ACCOUNT);
- if (!sp->external_spt) {
- free_page((unsigned long)sp->spt);
- kmem_cache_free(mmu_page_header_cache, sp);
- return NULL;
- }
+ if (!sp->external_spt)
+ goto err_external_spt;
+
+ if (kvm_x86_call(topup_external_cache)(kvm_get_running_vcpu(), 1))
+ goto err_external_split;
}
return sp;
+
+err_external_split:
+ kvm_x86_call(free_external_sp)((unsigned long)sp->external_spt);
+err_external_spt:
+ free_page((unsigned long)sp->spt);
+err_spt:
+ kmem_cache_free(mmu_page_header_cache, sp);
+ return NULL;
+
}
/* Note, the caller is responsible for initializing @sp. */
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 59b7ba36d3d9..e90610540a0b 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1625,6 +1625,9 @@ static int tdx_topup_external_pamt_cache(struct kvm_vcpu *vcpu, int min)
if (!tdx_supports_dynamic_pamt(tdx_sysinfo))
return 0;
+ if (WARN_ON_ONCE(!vcpu))
+ return -EIO;
+
return tdx_topup_pamt_cache(&to_tdx(vcpu)->pamt_cache, min);
}
--
2.53.0.rc1.217.geba53bf80e-goog
Powered by blists - more mailing lists