[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2b61cda6-4d8f-42d2-8a5e-25c90365602e@linux.intel.com>
Date: Thu, 16 Nov 2023 13:36:13 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: isaku.yamahata@...el.com
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
isaku.yamahata@...il.com, Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com, Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com,
hang.yuan@...el.com, tina.zhang@...el.com,
Xiaoyao Li <xiaoyao.li@...el.com>
Subject: Re: [PATCH v6 02/16] KVM: TDX: Pass page level to cache flush before
TDX SEAMCALL
On 11/7/2023 11:00 PM, isaku.yamahata@...el.com wrote:
> From: Xiaoyao Li <xiaoyao.li@...el.com>
>
> tdh_mem_page_aug() will support 2MB large page in the near future. Cache
> flush also needs to be 2MB instead of 4KB in such cases. Introduce a
> helper function to flush cache with page size info in preparation for large
> pages.
>
> Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Nit: About the shortlog, is it clearer to say "Flush cache for a page
based on page size before TDX SEAMCALL"?
Reviewed-by: Binbin Wu <binbin.wu@...ux.intel.com>
> ---
> arch/x86/kvm/vmx/tdx_ops.h | 22 ++++++++++++++--------
> 1 file changed, 14 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h
> index fd73a1731bf8..e726102d3523 100644
> --- a/arch/x86/kvm/vmx/tdx_ops.h
> +++ b/arch/x86/kvm/vmx/tdx_ops.h
> @@ -6,6 +6,7 @@
>
> #include <linux/compiler.h>
>
> +#include <asm/pgtable_types.h>
> #include <asm/archrandom.h>
> #include <asm/cacheflush.h>
> #include <asm/asm.h>
> @@ -62,6 +63,11 @@ static inline u64 tdx_seamcall(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9,
> void pr_tdx_error(u64 op, u64 error_code, const struct tdx_module_args *out);
> #endif
>
> +static inline void tdx_clflush_page(hpa_t addr, enum pg_level level)
> +{
> + clflush_cache_range(__va(addr), KVM_HPAGE_SIZE(level));
> +}
> +
> /*
> * TDX module acquires its internal lock for resources. It doesn't spin to get
> * locks because of its restrictions of allowed execution time. Instead, it
> @@ -94,21 +100,21 @@ static inline u64 tdx_seamcall_sept(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9,
>
> static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr)
> {
> - clflush_cache_range(__va(addr), PAGE_SIZE);
> + tdx_clflush_page(addr, PG_LEVEL_4K);
> return tdx_seamcall(TDH_MNG_ADDCX, addr, tdr, 0, 0, NULL);
> }
>
> static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source,
> struct tdx_module_args *out)
> {
> - clflush_cache_range(__va(hpa), PAGE_SIZE);
> + tdx_clflush_page(hpa, PG_LEVEL_4K);
> return tdx_seamcall_sept(TDH_MEM_PAGE_ADD, gpa, tdr, hpa, source, out);
> }
>
> static inline u64 tdh_mem_sept_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page,
> struct tdx_module_args *out)
> {
> - clflush_cache_range(__va(page), PAGE_SIZE);
> + tdx_clflush_page(page, PG_LEVEL_4K);
> return tdx_seamcall_sept(TDH_MEM_SEPT_ADD, gpa | level, tdr, page, 0, out);
> }
>
> @@ -126,21 +132,21 @@ static inline u64 tdh_mem_sept_remove(hpa_t tdr, gpa_t gpa, int level,
>
> static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr)
> {
> - clflush_cache_range(__va(addr), PAGE_SIZE);
> + tdx_clflush_page(addr, PG_LEVEL_4K);
> return tdx_seamcall(TDH_VP_ADDCX, addr, tdvpr, 0, 0, NULL);
> }
>
> static inline u64 tdh_mem_page_relocate(hpa_t tdr, gpa_t gpa, hpa_t hpa,
> struct tdx_module_args *out)
> {
> - clflush_cache_range(__va(hpa), PAGE_SIZE);
> + tdx_clflush_page(hpa, PG_LEVEL_4K);
> return tdx_seamcall_sept(TDH_MEM_PAGE_RELOCATE, gpa, tdr, hpa, 0, out);
> }
>
> static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa,
> struct tdx_module_args *out)
> {
> - clflush_cache_range(__va(hpa), PAGE_SIZE);
> + tdx_clflush_page(hpa, PG_LEVEL_4K);
> return tdx_seamcall_sept(TDH_MEM_PAGE_AUG, gpa, tdr, hpa, 0, out);
> }
>
> @@ -157,13 +163,13 @@ static inline u64 tdh_mng_key_config(hpa_t tdr)
>
> static inline u64 tdh_mng_create(hpa_t tdr, int hkid)
> {
> - clflush_cache_range(__va(tdr), PAGE_SIZE);
> + tdx_clflush_page(tdr, PG_LEVEL_4K);
> return tdx_seamcall(TDH_MNG_CREATE, tdr, hkid, 0, 0, NULL);
> }
>
> static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr)
> {
> - clflush_cache_range(__va(tdvpr), PAGE_SIZE);
> + tdx_clflush_page(tdvpr, PG_LEVEL_4K);
> return tdx_seamcall(TDH_VP_CREATE, tdvpr, tdr, 0, 0, NULL);
> }
>
Powered by blists - more mailing lists