[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <0fc03e6439409b54e0477128c33e11438a46253f.1705965958.git.isaku.yamahata@intel.com>
Date: Mon, 22 Jan 2024 16:22:16 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com,
isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
Kai Huang <kai.huang@...el.com>,
chen.bo@...el.com,
hang.yuan@...el.com,
tina.zhang@...el.com,
Xiaoyao Li <xiaoyao.li@...el.com>,
Binbin Wu <binbin.wu@...ux.intel.com>
Subject: [PATCH v7 01/13] KVM: TDX: Flush cache based on page size before TDX SEAMCALL
From: Xiaoyao Li <xiaoyao.li@...el.com>
tdh_mem_page_aug() will support 2MB large page in the near future. Cache
flush also needs to be 2MB instead of 4KB in such cases. Introduce a
helper function to flush cache with page size info in preparation for large
pages.
Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Reviewed-by: Binbin Wu <binbin.wu@...ux.intel.com>
---
v6:
- catch up tdx_seamcall() change
---
arch/x86/kvm/vmx/tdx_ops.h | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h
index 3513d5df10ee..2afd927eaa45 100644
--- a/arch/x86/kvm/vmx/tdx_ops.h
+++ b/arch/x86/kvm/vmx/tdx_ops.h
@@ -6,6 +6,7 @@
#include <linux/compiler.h>
+#include <asm/pgtable_types.h>
#include <asm/archrandom.h>
#include <asm/cacheflush.h>
#include <asm/asm.h>
@@ -58,6 +59,11 @@ static inline int pg_level_to_tdx_sept_level(enum pg_level level)
return level - 1;
}
+static inline void tdx_clflush_page(hpa_t addr, enum pg_level level)
+{
+ clflush_cache_range(__va(addr), KVM_HPAGE_SIZE(level));
+}
+
/*
* TDX module acquires its internal lock for resources. It doesn't spin to get
* locks because of its restrictions of allowed execution time. Instead, it
@@ -95,7 +101,7 @@ static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr)
.rdx = tdr,
};
- clflush_cache_range(__va(addr), PAGE_SIZE);
+ tdx_clflush_page(addr, PG_LEVEL_4K);
return tdx_seamcall(TDH_MNG_ADDCX, &in, NULL);
}
@@ -109,7 +115,7 @@ static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source
.r9 = source,
};
- clflush_cache_range(__va(hpa), PAGE_SIZE);
+ tdx_clflush_page(hpa, PG_LEVEL_4K);
return tdx_seamcall_sept(TDH_MEM_PAGE_ADD, &in, out);
}
@@ -122,7 +128,7 @@ static inline u64 tdh_mem_sept_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page,
.r8 = page,
};
- clflush_cache_range(__va(page), PAGE_SIZE);
+ tdx_clflush_page(page, PG_LEVEL_4K);
return tdx_seamcall_sept(TDH_MEM_SEPT_ADD, &in, out);
}
@@ -155,7 +161,7 @@ static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr)
.rdx = tdvpr,
};
- clflush_cache_range(__va(addr), PAGE_SIZE);
+ tdx_clflush_page(addr, PG_LEVEL_4K);
return tdx_seamcall(TDH_VP_ADDCX, &in, NULL);
}
@@ -168,7 +174,7 @@ static inline u64 tdh_mem_page_relocate(hpa_t tdr, gpa_t gpa, hpa_t hpa,
.r8 = hpa,
};
- clflush_cache_range(__va(hpa), PAGE_SIZE);
+ tdx_clflush_page(hpa, PG_LEVEL_4K);
return tdx_seamcall_sept(TDH_MEM_PAGE_RELOCATE, &in, out);
}
@@ -181,7 +187,7 @@ static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa,
.r8 = hpa,
};
- clflush_cache_range(__va(hpa), PAGE_SIZE);
+ tdx_clflush_page(hpa, PG_LEVEL_4K);
return tdx_seamcall_sept(TDH_MEM_PAGE_AUG, &in, out);
}
@@ -212,7 +218,7 @@ static inline u64 tdh_mng_create(hpa_t tdr, int hkid)
.rdx = hkid,
};
- clflush_cache_range(__va(tdr), PAGE_SIZE);
+ tdx_clflush_page(tdr, PG_LEVEL_4K);
return tdx_seamcall(TDH_MNG_CREATE, &in, NULL);
}
@@ -223,7 +229,7 @@ static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr)
.rdx = tdr,
};
- clflush_cache_range(__va(tdvpr), PAGE_SIZE);
+ tdx_clflush_page(tdvpr, PG_LEVEL_4K);
return tdx_seamcall(TDH_VP_CREATE, &in, NULL);
}
--
2.25.1
Powered by blists - more mailing lists