[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250424030532.32756-1-yan.y.zhao@intel.com>
Date: Thu, 24 Apr 2025 11:05:32 +0800
From: Yan Zhao <yan.y.zhao@...el.com>
To: pbonzini@...hat.com,
seanjc@...gle.com
Cc: linux-kernel@...r.kernel.org,
kvm@...r.kernel.org,
x86@...nel.org,
rick.p.edgecombe@...el.com,
dave.hansen@...el.com,
kirill.shutemov@...el.com,
tabba@...gle.com,
ackerleytng@...gle.com,
quic_eberman@...cinc.com,
michael.roth@....com,
david@...hat.com,
vannapurve@...gle.com,
vbabka@...e.cz,
jroedel@...e.de,
thomas.lendacky@....com,
pgonda@...gle.com,
zhiquan1.li@...el.com,
fan.du@...el.com,
jun.miao@...el.com,
ira.weiny@...el.com,
isaku.yamahata@...el.com,
xiaoyao.li@...el.com,
binbin.wu@...ux.intel.com,
chao.p.peng@...el.com,
Yan Zhao <yan.y.zhao@...el.com>
Subject: [RFC PATCH 06/21] KVM: TDX: Assert the reclaimed pages were mapped as expected
From: Xiaoyao Li <xiaoyao.li@...el.com>
Provide level information to tdx_reclaim_page() to enable it to verify that
the reclaimed pages were mapped at the expected level in the S-EPT.
[Yan: split patch, wrote patch log]
Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Signed-off-by: Yan Zhao <yan.y.zhao@...el.com>
---
arch/x86/kvm/vmx/tdx.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 1186085795ac..69f3140928b5 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -325,7 +325,7 @@ static void tdx_no_vcpus_enter_stop(struct kvm *kvm)
}
/* TDH.PHYMEM.PAGE.RECLAIM is allowed only when destroying the TD. */
-static int __tdx_reclaim_page(struct page *page)
+static int __tdx_reclaim_page(struct page *page, int level)
{
u64 err, tdx_pt, tdx_owner, tdx_size;
@@ -340,16 +340,18 @@ static int __tdx_reclaim_page(struct page *page)
pr_tdx_error_3(TDH_PHYMEM_PAGE_RECLAIM, err, tdx_pt, tdx_owner, tdx_size);
return -EIO;
}
+
+ WARN_ON_ONCE(tdx_size != pg_level_to_tdx_sept_level(level));
return 0;
}
-static int tdx_reclaim_page(struct page *page)
+static int tdx_reclaim_page(struct page *page, int level)
{
int r;
- r = __tdx_reclaim_page(page);
+ r = __tdx_reclaim_page(page, level);
if (!r)
- tdx_clear_page(page, PG_LEVEL_4K);
+ tdx_clear_page(page, level);
return r;
}
@@ -364,7 +366,7 @@ static void tdx_reclaim_control_page(struct page *ctrl_page)
* Leak the page if the kernel failed to reclaim the page.
* The kernel cannot use it safely anymore.
*/
- if (tdx_reclaim_page(ctrl_page))
+ if (tdx_reclaim_page(ctrl_page, PG_LEVEL_4K))
return;
__free_page(ctrl_page);
@@ -583,7 +585,7 @@ static void tdx_reclaim_td_control_pages(struct kvm *kvm)
if (!kvm_tdx->td.tdr_page)
return;
- if (__tdx_reclaim_page(kvm_tdx->td.tdr_page))
+ if (__tdx_reclaim_page(kvm_tdx->td.tdr_page, PG_LEVEL_4K))
return;
/*
@@ -1791,7 +1793,7 @@ int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
* The HKID assigned to this TD was already freed and cache was
* already flushed. We don't have to flush again.
*/
- return tdx_reclaim_page(virt_to_page(private_spt));
+ return tdx_reclaim_page(virt_to_page(private_spt), PG_LEVEL_4K);
}
int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
--
2.43.2
Powered by blists - more mailing lists