[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250807094214.4495-1-yan.y.zhao@intel.com>
Date: Thu, 7 Aug 2025 17:42:14 +0800
From: Yan Zhao <yan.y.zhao@...el.com>
To: pbonzini@...hat.com,
seanjc@...gle.com
Cc: linux-kernel@...r.kernel.org,
kvm@...r.kernel.org,
x86@...nel.org,
rick.p.edgecombe@...el.com,
dave.hansen@...el.com,
kas@...nel.org,
tabba@...gle.com,
ackerleytng@...gle.com,
quic_eberman@...cinc.com,
michael.roth@....com,
david@...hat.com,
vannapurve@...gle.com,
vbabka@...e.cz,
thomas.lendacky@....com,
pgonda@...gle.com,
zhiquan1.li@...el.com,
fan.du@...el.com,
jun.miao@...el.com,
ira.weiny@...el.com,
isaku.yamahata@...el.com,
xiaoyao.li@...el.com,
binbin.wu@...ux.intel.com,
chao.p.peng@...el.com,
yan.y.zhao@...el.com
Subject: [RFC PATCH v2 04/23] KVM: TDX: Introduce tdx_clear_folio() to clear huge pages
After removing or reclaiming a guest private page or a control page from a
TD, zero the physical page using movdir64b(), enabling the kernel to reuse
the pages.
Introduce the function tdx_clear_folio() to zero out physical memory using
movdir64b(), starting from the page at "start_idx" within a "folio" and
spanning "npages" contiguous PFNs.
Convert tdx_clear_page() to be a helper function to facilitate the
zeroing of 4KB pages.
Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Signed-off-by: Yan Zhao <yan.y.zhao@...el.com>
---
RFC v2:
- Add tdx_clear_folio().
- Drop inner loop _tdx_clear_page() and move __mb() outside of the loop.
(Rick)
- Use C99-style definition of variables inside a for loop.
- Note: [1] also changes tdx_clear_page(). RFC v2 is not based on [1] now.
[1] https://lore.kernel.org/all/20250724130354.79392-2-adrian.hunter@intel.com
RFC v1:
- split out, let tdx_clear_page() accept level.
---
arch/x86/kvm/vmx/tdx.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 8eaf8431c5f1..4fabefb27135 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -277,18 +277,21 @@ static inline void tdx_disassociate_vp(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
}
-static void tdx_clear_page(struct page *page)
+static void tdx_clear_folio(struct folio *folio, unsigned long start_idx,
+ unsigned long npages)
{
const void *zero_page = (const void *) page_to_virt(ZERO_PAGE(0));
- void *dest = page_to_virt(page);
- unsigned long i;
/*
* The page could have been poisoned. MOVDIR64B also clears
* the poison bit so the kernel can safely use the page again.
*/
- for (i = 0; i < PAGE_SIZE; i += 64)
- movdir64b(dest + i, zero_page);
+ for (unsigned long j = 0; j < npages; j++) {
+ void *dest = page_to_virt(folio_page(folio, start_idx + j));
+
+ for (unsigned long i = 0; i < PAGE_SIZE; i += 64)
+ movdir64b(dest + i, zero_page);
+ }
/*
* MOVDIR64B store uses WC buffer. Prevent following memory reads
* from seeing potentially poisoned cache.
@@ -296,6 +299,13 @@ static void tdx_clear_page(struct page *page)
__mb();
}
+static inline void tdx_clear_page(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+
+ tdx_clear_folio(folio, folio_page_idx(folio, page), 1);
+}
+
static void tdx_no_vcpus_enter_start(struct kvm *kvm)
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
@@ -1736,7 +1746,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
return -EIO;
}
- tdx_clear_page(page);
+ tdx_clear_folio(folio, folio_page_idx(folio, page), KVM_PAGES_PER_HPAGE(level));
tdx_pamt_put(page, level);
tdx_unpin(kvm, page);
return 0;
--
2.43.2
Powered by blists - more mailing lists