[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260129011517.3545883-19-seanjc@google.com>
Date: Wed, 28 Jan 2026 17:14:50 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Thomas Gleixner <tglx@...nel.org>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
Kiryl Shutsemau <kas@...nel.org>, Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-coco@...ts.linux.dev,
kvm@...r.kernel.org, Kai Huang <kai.huang@...el.com>,
Rick Edgecombe <rick.p.edgecombe@...el.com>, Yan Zhao <yan.y.zhao@...el.com>,
Vishal Annapurve <vannapurve@...gle.com>, Ackerley Tng <ackerleytng@...gle.com>,
Sagi Shahar <sagis@...gle.com>, Binbin Wu <binbin.wu@...ux.intel.com>,
Xiaoyao Li <xiaoyao.li@...el.com>, Isaku Yamahata <isaku.yamahata@...el.com>
Subject: [RFC PATCH v5 18/45] KVM: TDX: Allocate PAMT memory for TD and vCPU
control structures
From: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
TDX TD control structures are provided to the TDX module at 4KB page size
and require PAMT backing. This means for Dynamic PAMT they need to also
have 4KB backings installed. Use the recently introduce TDX APIs for
allocating/freeing control pages, which handle DPAMT maintenance, to
allocate/free TD and vCPU pages for TDX guests.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
[update log]
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@...el.com>
[sean: handle alloc+free+reclaim in one patch]
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/vmx/tdx.c | 35 ++++++++++++++---------------------
1 file changed, 14 insertions(+), 21 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 4ef414ee27b4..323aae4300a1 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -360,7 +360,7 @@ static void tdx_reclaim_control_page(struct page *ctrl_page)
if (tdx_reclaim_page(ctrl_page))
return;
- __free_page(ctrl_page);
+ __tdx_free_control_page(ctrl_page);
}
struct tdx_flush_vp_arg {
@@ -597,7 +597,7 @@ static void tdx_reclaim_td_control_pages(struct kvm *kvm)
tdx_quirk_reset_page(kvm_tdx->td.tdr_page);
- __free_page(kvm_tdx->td.tdr_page);
+ __tdx_free_control_page(kvm_tdx->td.tdr_page);
kvm_tdx->td.tdr_page = NULL;
}
@@ -2412,7 +2412,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
atomic_inc(&nr_configured_hkid);
- tdr_page = alloc_page(GFP_KERNEL_ACCOUNT);
+ tdr_page = __tdx_alloc_control_page(GFP_KERNEL_ACCOUNT);
if (!tdr_page)
goto free_hkid;
@@ -2425,7 +2425,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
goto free_tdr;
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
- tdcs_pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
+ tdcs_pages[i] = __tdx_alloc_control_page(GFP_KERNEL_ACCOUNT);
if (!tdcs_pages[i])
goto free_tdcs;
}
@@ -2543,10 +2543,8 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
teardown:
/* Only free pages not yet added, so start at 'i' */
for (; i < kvm_tdx->td.tdcs_nr_pages; i++) {
- if (tdcs_pages[i]) {
- __free_page(tdcs_pages[i]);
- tdcs_pages[i] = NULL;
- }
+ __tdx_free_control_page(tdcs_pages[i]);
+ tdcs_pages[i] = NULL;
}
if (!kvm_tdx->td.tdcs_pages)
kfree(tdcs_pages);
@@ -2561,16 +2559,13 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
free_cpumask_var(packages);
free_tdcs:
- for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
- if (tdcs_pages[i])
- __free_page(tdcs_pages[i]);
- }
+ for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++)
+ __tdx_free_control_page(tdcs_pages[i]);
kfree(tdcs_pages);
kvm_tdx->td.tdcs_pages = NULL;
free_tdr:
- if (tdr_page)
- __free_page(tdr_page);
+ __tdx_free_control_page(tdr_page);
kvm_tdx->td.tdr_page = NULL;
free_hkid:
@@ -2900,7 +2895,7 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
int ret, i;
u64 err;
- page = alloc_page(GFP_KERNEL_ACCOUNT);
+ page = __tdx_alloc_control_page(GFP_KERNEL_ACCOUNT);
if (!page)
return -ENOMEM;
tdx->vp.tdvpr_page = page;
@@ -2920,7 +2915,7 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
}
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
- page = alloc_page(GFP_KERNEL_ACCOUNT);
+ page = __tdx_alloc_control_page(GFP_KERNEL_ACCOUNT);
if (!page) {
ret = -ENOMEM;
goto free_tdcx;
@@ -2942,7 +2937,7 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
* method, but the rest are freed here.
*/
for (; i < kvm_tdx->td.tdcx_nr_pages; i++) {
- __free_page(tdx->vp.tdcx_pages[i]);
+ __tdx_free_control_page(tdx->vp.tdcx_pages[i]);
tdx->vp.tdcx_pages[i] = NULL;
}
return -EIO;
@@ -2970,16 +2965,14 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
free_tdcx:
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
- if (tdx->vp.tdcx_pages[i])
- __free_page(tdx->vp.tdcx_pages[i]);
+ __tdx_free_control_page(tdx->vp.tdcx_pages[i]);
tdx->vp.tdcx_pages[i] = NULL;
}
kfree(tdx->vp.tdcx_pages);
tdx->vp.tdcx_pages = NULL;
free_tdvpr:
- if (tdx->vp.tdvpr_page)
- __free_page(tdx->vp.tdvpr_page);
+ __tdx_free_control_page(tdx->vp.tdvpr_page);
tdx->vp.tdvpr_page = NULL;
tdx->vp.tdvpr_pa = 0;
--
2.53.0.rc1.217.geba53bf80e-goog
Powered by blists - more mailing lists