[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250424030516.32740-1-yan.y.zhao@intel.com>
Date: Thu, 24 Apr 2025 11:05:16 +0800
From: Yan Zhao <yan.y.zhao@...el.com>
To: pbonzini@...hat.com,
seanjc@...gle.com
Cc: linux-kernel@...r.kernel.org,
kvm@...r.kernel.org,
x86@...nel.org,
rick.p.edgecombe@...el.com,
dave.hansen@...el.com,
kirill.shutemov@...el.com,
tabba@...gle.com,
ackerleytng@...gle.com,
quic_eberman@...cinc.com,
michael.roth@....com,
david@...hat.com,
vannapurve@...gle.com,
vbabka@...e.cz,
jroedel@...e.de,
thomas.lendacky@....com,
pgonda@...gle.com,
zhiquan1.li@...el.com,
fan.du@...el.com,
jun.miao@...el.com,
ira.weiny@...el.com,
isaku.yamahata@...el.com,
xiaoyao.li@...el.com,
binbin.wu@...ux.intel.com,
chao.p.peng@...el.com,
Yan Zhao <yan.y.zhao@...el.com>
Subject: [RFC PATCH 05/21] KVM: TDX: Enhance tdx_clear_page() to support huge pages
From: Xiaoyao Li <xiaoyao.li@...el.com>
KVM invokes tdx_clear_page() to zero pages using movdir64b().
Include level information to enable tdx_clear_page() to zero a huge page.
[Yan: split out, let tdx_clear_page() accept level]
Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Signed-off-by: Yan Zhao <yan.y.zhao@...el.com>
---
arch/x86/kvm/vmx/tdx.c | 19 ++++++++++++++-----
1 file changed, 14 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 03885cb2869b..1186085795ac 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -276,7 +276,7 @@ static inline void tdx_disassociate_vp(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
}
-static void tdx_clear_page(struct page *page)
+static void __tdx_clear_page(struct page *page)
{
const void *zero_page = (const void *) page_to_virt(ZERO_PAGE(0));
void *dest = page_to_virt(page);
@@ -295,6 +295,15 @@ static void tdx_clear_page(struct page *page)
__mb();
}
+static void tdx_clear_page(struct page *page, int level)
+{
+ unsigned long nr = KVM_PAGES_PER_HPAGE(level);
+ unsigned long idx = 0;
+
+ while (nr--)
+ __tdx_clear_page(nth_page(page, idx++));
+}
+
static void tdx_no_vcpus_enter_start(struct kvm *kvm)
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
@@ -340,11 +349,10 @@ static int tdx_reclaim_page(struct page *page)
r = __tdx_reclaim_page(page);
if (!r)
- tdx_clear_page(page);
+ tdx_clear_page(page, PG_LEVEL_4K);
return r;
}
-
/*
* Reclaim the TD control page(s) which are crypto-protected by TDX guest's
* private KeyID. Assume the cache associated with the TDX private KeyID has
@@ -588,7 +596,7 @@ static void tdx_reclaim_td_control_pages(struct kvm *kvm)
pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
return;
}
- tdx_clear_page(kvm_tdx->td.tdr_page);
+ tdx_clear_page(kvm_tdx->td.tdr_page, PG_LEVEL_4K);
__free_page(kvm_tdx->td.tdr_page);
kvm_tdx->td.tdr_page = NULL;
@@ -1621,7 +1629,8 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
return -EIO;
}
- tdx_clear_page(page);
+
+ tdx_clear_page(page, level);
tdx_unpin(kvm, page);
return 0;
}
--
2.43.2
Powered by blists - more mailing lists