lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 3 Mar 2022 18:34:26 +0000
From:   Mingwei Zhang <mizhang@...gle.com>
To:     Sean Christopherson <seanjc@...gle.com>
Cc:     Paolo Bonzini <pbonzini@...hat.com>,
        Christian Borntraeger <borntraeger@...ux.ibm.com>,
        Janosch Frank <frankja@...ux.ibm.com>,
        Claudio Imbrenda <imbrenda@...ux.ibm.com>,
        Vitaly Kuznetsov <vkuznets@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Joerg Roedel <joro@...tes.org>,
        David Hildenbrand <david@...hat.com>, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org, David Matlack <dmatlack@...gle.com>,
        Ben Gardon <bgardon@...gle.com>
Subject: Re: [PATCH v3 10/28] KVM: x86/mmu: Add helpers to read/write TDP MMU
 SPTEs and document RCU

On Sat, Feb 26, 2022, Sean Christopherson wrote:
> Add helpers to read and write TDP MMU SPTEs instead of open coding
> rcu_dereference() all over the place, and to provide a convenient
> location to document why KVM doesn't exempt holding mmu_lock for write
> from having to hold RCU (and any future changes to the rules).
> 
> No functional change intended.
> 
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> Reviewed-by: Ben Gardon <bgardon@...gle.com>

Reviewed-by: Mingwei Zhang <mizhang@...gle.com>
> ---
>  arch/x86/kvm/mmu/tdp_iter.c |  6 +++---
>  arch/x86/kvm/mmu/tdp_iter.h | 16 ++++++++++++++++
>  arch/x86/kvm/mmu/tdp_mmu.c  |  6 +++---
>  3 files changed, 22 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
> index be3f096db2eb..6d3b3e5a5533 100644
> --- a/arch/x86/kvm/mmu/tdp_iter.c
> +++ b/arch/x86/kvm/mmu/tdp_iter.c
> @@ -12,7 +12,7 @@ static void tdp_iter_refresh_sptep(struct tdp_iter *iter)
>  {
>  	iter->sptep = iter->pt_path[iter->level - 1] +
>  		SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level);
> -	iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep));
> +	iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep);
>  }
>  
>  static gfn_t round_gfn_for_level(gfn_t gfn, int level)
> @@ -89,7 +89,7 @@ static bool try_step_down(struct tdp_iter *iter)
>  	 * Reread the SPTE before stepping down to avoid traversing into page
>  	 * tables that are no longer linked from this entry.
>  	 */
> -	iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep));
> +	iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep);
>  
>  	child_pt = spte_to_child_pt(iter->old_spte, iter->level);
>  	if (!child_pt)
> @@ -123,7 +123,7 @@ static bool try_step_side(struct tdp_iter *iter)
>  	iter->gfn += KVM_PAGES_PER_HPAGE(iter->level);
>  	iter->next_last_level_gfn = iter->gfn;
>  	iter->sptep++;
> -	iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep));
> +	iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep);
>  
>  	return true;
>  }
> diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
> index 216ebbe76ddd..bb9b581f1ee4 100644
> --- a/arch/x86/kvm/mmu/tdp_iter.h
> +++ b/arch/x86/kvm/mmu/tdp_iter.h
> @@ -9,6 +9,22 @@
>  
>  typedef u64 __rcu *tdp_ptep_t;
>  
> +/*
> + * TDP MMU SPTEs are RCU protected to allow paging structures (non-leaf SPTEs)
> + * to be zapped while holding mmu_lock for read.  Holding RCU isn't required for
> + * correctness if mmu_lock is held for write, but plumbing "struct kvm" down to
> + * the lower depths of the TDP MMU just to make lockdep happy is a nightmare, so
> + * all accesses to SPTEs are done under RCU protection.
> + */
> +static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
> +{
> +	return READ_ONCE(*rcu_dereference(sptep));
> +}
> +static inline void kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 val)
> +{
> +	WRITE_ONCE(*rcu_dereference(sptep), val);
> +}
> +
>  /*
>   * A TDP iterator performs a pre-order walk over a TDP paging structure.
>   */
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 4f460782a848..8fbf3364f116 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -609,7 +609,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
>  	 * here since the SPTE is going from non-present
>  	 * to non-present.
>  	 */
> -	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
> +	kvm_tdp_mmu_write_spte(iter->sptep, 0);
>  
>  	return 0;
>  }
> @@ -648,7 +648,7 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
>  	 */
>  	WARN_ON(is_removed_spte(iter->old_spte));
>  
> -	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
> +	kvm_tdp_mmu_write_spte(iter->sptep, new_spte);
>  
>  	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
>  			      new_spte, iter->level, false);
> @@ -1046,7 +1046,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
>  			 * because the new value informs the !present
>  			 * path below.
>  			 */
> -			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
> +			iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep);
>  		}
>  
>  		if (!is_shadow_present_pte(iter.old_spte)) {
> -- 
> 2.35.1.574.g5d30c73bfb-goog
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ