[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <8f4125c90898652317ae6bec5d46fe45d3f11eef.1708933624.git.isaku.yamahata@intel.com>
Date: Mon, 26 Feb 2024 00:29:17 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com,
isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
Kai Huang <kai.huang@...el.com>,
chen.bo@...el.com,
hang.yuan@...el.com,
tina.zhang@...el.com,
Xiaoyao Li <xiaoyao.li@...el.com>
Subject: [PATCH v8 03/14] KVM: TDX: Pass KVM page level to tdh_mem_page_aug()
From: Xiaoyao Li <xiaoyao.li@...el.com>
Level info is needed in tdx_clflush_page() to generate the correct page
size.
Besides, explicitly pass level info to SEAMCALL instead of assuming
it's zero. It works naturally when 2MB support lands.
Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
v7:
- Don't pass level to tdh_mem_page_add() as it supports only 4K page.
- catch up for change of tdx_seamcall()
---
arch/x86/kvm/vmx/tdx.c | 2 +-
arch/x86/kvm/vmx/tdx_ops.h | 12 +++++++++---
2 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index a71093f7c3e3..fd992966379c 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1470,7 +1470,7 @@ static int tdx_mem_page_aug(struct kvm *kvm, gfn_t gfn,
union tdx_sept_entry entry;
u64 err;
- err = tdh_mem_page_aug(kvm_tdx->tdr_pa, gpa, hpa, &out);
+ err = tdh_mem_page_aug(kvm_tdx->tdr_pa, gpa, tdx_level, hpa, &out);
if (unlikely(err == TDX_ERROR_SEPT_BUSY)) {
tdx_unpin(kvm, pfn);
return -EAGAIN;
diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h
index 3af124711e98..ef4748943ac7 100644
--- a/arch/x86/kvm/vmx/tdx_ops.h
+++ b/arch/x86/kvm/vmx/tdx_ops.h
@@ -51,6 +51,11 @@ static inline int pg_level_to_tdx_sept_level(enum pg_level level)
return level - 1;
}
+static inline enum pg_level tdx_sept_level_to_pg_level(int tdx_level)
+{
+ return tdx_level + 1;
+}
+
static inline void tdx_clflush_page(hpa_t addr, enum pg_level level)
{
clflush_cache_range(__va(addr), KVM_HPAGE_SIZE(level));
@@ -100,6 +105,7 @@ static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr)
static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source,
struct tdx_module_args *out)
{
+ /* TDH.MEM.PAGE.ADD() suports only 4K page. tdx 4K page level = 0 */
struct tdx_module_args in = {
.rcx = gpa,
.rdx = tdr,
@@ -170,16 +176,16 @@ static inline u64 tdh_mem_page_relocate(hpa_t tdr, gpa_t gpa, hpa_t hpa,
return tdx_seamcall_sept(TDH_MEM_PAGE_RELOCATE, &in, out);
}
-static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa,
+static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, int level, hpa_t hpa,
struct tdx_module_args *out)
{
struct tdx_module_args in = {
- .rcx = gpa,
+ .rcx = gpa | level,
.rdx = tdr,
.r8 = hpa,
};
- tdx_clflush_page(hpa, PG_LEVEL_4K);
+ tdx_clflush_page(hpa, tdx_sept_level_to_pg_level(level));
return tdx_seamcall_sept(TDH_MEM_PAGE_AUG, &in, out);
}
--
2.25.1
Powered by blists - more mailing lists