[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c8e33fa0-49a1-43d2-9500-a94e951a0d45@linux.intel.com>
Date: Wed, 13 Dec 2023 17:04:54 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: isaku.yamahata@...el.com
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
isaku.yamahata@...il.com, Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com, Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com,
hang.yuan@...el.com, tina.zhang@...el.com,
Rick Edgecombe <rick.p.edgecombe@...el.com>
Subject: Re: [PATCH v17 029/116] KVM: x86/mmu: Add address conversion
functions for TDX shared bit of GPA
On 11/7/2023 10:55 PM, isaku.yamahata@...el.com wrote:
> From: Isaku Yamahata <isaku.yamahata@...el.com>
>
> TDX repurposes one GPA bit (51 bit or 47 bit based on configuration) to
> indicate the GPA is private(if cleared) or shared (if set) with VMM. If
> GPA.shared is set, GPA is covered by the existing conventional EPT pointed
> by EPTP. If GPA.shared bit is cleared, GPA is covered by TDX module.
> VMM has to issue SEAMCALLs to operate.
>
> Add a member to remember GPA shared bit for each guest TDs, add address
> conversion functions between private GPA and shared GPA and test if GPA
> is private.
>
> Because struct kvm_arch (or struct kvm which includes struct kvm_arch. See
> kvm_arch_alloc_vm() that passes __GPF_ZERO) is zero-cleared when allocated,
> the new member to remember GPA shared bit is guaranteed to be zero with
> this patch unless it's initialized explicitly.
>
> Co-developed-by: Rick Edgecombe <rick.p.edgecombe@...el.com>
> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@...el.com>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Reviewed-by: Binbin Wu <binbin.wu@...ux.intel.com>
> ---
> arch/x86/include/asm/kvm_host.h | 4 ++++
> arch/x86/kvm/mmu.h | 27 +++++++++++++++++++++++++++
> arch/x86/kvm/vmx/tdx.c | 5 +++++
> 3 files changed, 36 insertions(+)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 78aa844f4dba..babdc3a6ba5e 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1475,6 +1475,10 @@ struct kvm_arch {
> */
> #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1)
> struct kvm_mmu_memory_cache split_desc_cache;
> +
> +#ifdef CONFIG_KVM_MMU_PRIVATE
> + gfn_t gfn_shared_mask;
> +#endif
> };
>
> struct kvm_vm_stat {
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index bb8c86eefac0..f64bb734fbb6 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -311,4 +311,31 @@ static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
> return gpa;
> return translate_nested_gpa(vcpu, gpa, access, exception);
> }
> +
> +static inline gfn_t kvm_gfn_shared_mask(const struct kvm *kvm)
> +{
> +#ifdef CONFIG_KVM_MMU_PRIVATE
> + return kvm->arch.gfn_shared_mask;
> +#else
> + return 0;
> +#endif
> +}
> +
> +static inline gfn_t kvm_gfn_to_shared(const struct kvm *kvm, gfn_t gfn)
> +{
> + return gfn | kvm_gfn_shared_mask(kvm);
> +}
> +
> +static inline gfn_t kvm_gfn_to_private(const struct kvm *kvm, gfn_t gfn)
> +{
> + return gfn & ~kvm_gfn_shared_mask(kvm);
> +}
> +
> +static inline bool kvm_is_private_gpa(const struct kvm *kvm, gpa_t gpa)
> +{
> + gfn_t mask = kvm_gfn_shared_mask(kvm);
> +
> + return mask && !(gpa_to_gfn(gpa) & mask);
> +}
> +
> #endif
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index c1a8560981a3..fe793425d393 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -878,6 +878,11 @@ static int tdx_td_init(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
> kvm_tdx->attributes = td_params->attributes;
> kvm_tdx->xfam = td_params->xfam;
>
> + if (td_params->exec_controls & TDX_EXEC_CONTROL_MAX_GPAW)
> + kvm->arch.gfn_shared_mask = gpa_to_gfn(BIT_ULL(51));
> + else
> + kvm->arch.gfn_shared_mask = gpa_to_gfn(BIT_ULL(47));
> +
> out:
> /* kfree() accepts NULL. */
> kfree(init_vm);
Powered by blists - more mailing lists