[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <24bd2571-852e-47e6-a08b-17e808d14fd5@linux.intel.com>
Date: Tue, 5 Mar 2024 15:09:48 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org, seanjc@...gle.com,
michael.roth@....com, isaku.yamahata@...el.com, thomas.lendacky@....com
Subject: Re: [PATCH 03/21] KVM: x86/mmu: Replace hardcoded value 0 for the
initial value for SPTE
On 2/28/2024 7:20 AM, Paolo Bonzini wrote:
> From: Sean Christopherson <seanjc@...gle.com>
>
> The TDX support will need the "suppress #VE" bit (bit 63) set as the
> initial value for SPTE. To reduce code change size, introduce a new macro
> SHADOW_NONPRESENT_VALUE for the initial value for the shadow page table
> entry (SPTE) and replace hard-coded value 0 for it. Initialize shadow page
> tables with their value.
>
> The plan is to unconditionally set the "suppress #VE" bit for both AMD and
> Intel as: 1) AMD hardware uses the bit 63 as NX for present SPTE and
> ignored for non-present SPTE; 2) for conventional VMX guests, KVM never
> enables the "EPT-violation #VE" in VMCS control and "suppress #VE" bit is
> ignored by hardware.
>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> Message-Id: <acdf09bf60cad12c495005bf3495c54f6b3069c9.1705965635.git.isaku.yamahata@...el.com>
> [Remove unnecessary CONFIG_X86_64 check. - Paolo]
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 14 +++++++++-----
> arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
> arch/x86/kvm/mmu/spte.h | 2 ++
> arch/x86/kvm/mmu/tdp_mmu.c | 14 +++++++-------
> 4 files changed, 19 insertions(+), 13 deletions(-)
Reviewed-by: Binbin Wu <binbin.wu@...ux.intel.com>
BTW, does it prefer to add "No functional change intended." in changelog for
a patch like this?
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index e4cc7f764980..b5baf11359ad 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -567,9 +567,9 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
>
> if (!is_shadow_present_pte(old_spte) ||
> !spte_has_volatile_bits(old_spte))
> - __update_clear_spte_fast(sptep, 0ull);
> + __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
> else
> - old_spte = __update_clear_spte_slow(sptep, 0ull);
> + old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
>
> if (!is_shadow_present_pte(old_spte))
> return old_spte;
> @@ -603,7 +603,7 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
> */
> static void mmu_spte_clear_no_track(u64 *sptep)
> {
> - __update_clear_spte_fast(sptep, 0ull);
> + __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
> }
>
> static u64 mmu_spte_get_lockless(u64 *sptep)
> @@ -1950,7 +1950,8 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
>
> static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
> {
> - if (!sp->spt[i])
> + /* sp->spt[i] has initial value of shadow page table allocation */
> + if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
> return 0;
>
> return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
> @@ -6173,7 +6174,10 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
> vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
>
> - vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> + vcpu->arch.mmu_shadow_page_cache.init_value =
> + SHADOW_NONPRESENT_VALUE;
> + if (!vcpu->arch.mmu_shadow_page_cache.init_value)
> + vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
>
> vcpu->arch.mmu = &vcpu->arch.root_mmu;
> vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 4d4e98fe4f35..bebd73cd61bb 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -911,7 +911,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
> gpa_t pte_gpa;
> gfn_t gfn;
>
> - if (WARN_ON_ONCE(!sp->spt[i]))
> + if (WARN_ON_ONCE(sp->spt[i] == SHADOW_NONPRESENT_VALUE))
> return 0;
>
> first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
> diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
> index a129951c9a88..4d1799ba2bf8 100644
> --- a/arch/x86/kvm/mmu/spte.h
> +++ b/arch/x86/kvm/mmu/spte.h
> @@ -149,6 +149,8 @@ static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);
>
> #define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
>
> +#define SHADOW_NONPRESENT_VALUE 0ULL
> +
> extern u64 __read_mostly shadow_host_writable_mask;
> extern u64 __read_mostly shadow_mmu_writable_mask;
> extern u64 __read_mostly shadow_nx_mask;
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index d078157e62aa..c8a4d92497b4 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -603,7 +603,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
> * here since the SPTE is going from non-present to non-present. Use
> * the raw write helper to avoid an unnecessary check on volatile bits.
> */
> - __kvm_tdp_mmu_write_spte(iter->sptep, 0);
> + __kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);
>
> return 0;
> }
> @@ -740,8 +740,8 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
> continue;
>
> if (!shared)
> - tdp_mmu_iter_set_spte(kvm, &iter, 0);
> - else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
> + tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
> + else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))
> goto retry;
> }
> }
> @@ -808,8 +808,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
> if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
> return false;
>
> - tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
> - sp->gfn, sp->role.level + 1);
> + tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,
> + SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1);
>
> return true;
> }
> @@ -843,7 +843,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
> !is_last_spte(iter.old_spte, iter.level))
> continue;
>
> - tdp_mmu_iter_set_spte(kvm, &iter, 0);
> + tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
>
> /*
> * Zappings SPTEs in invalid roots doesn't require a TLB flush,
> @@ -1276,7 +1276,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
> * invariant that the PFN of a present * leaf SPTE can never change.
> * See handle_changed_spte().
> */
> - tdp_mmu_iter_set_spte(kvm, iter, 0);
> + tdp_mmu_iter_set_spte(kvm, iter, SHADOW_NONPRESENT_VALUE);
>
> if (!pte_write(range->arg.pte)) {
> new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
Powered by blists - more mailing lists