lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 28 Sep 2023 19:46:35 +0300
From:   Maxim Levitsky <mlevitsk@...hat.com>
To:     Paolo Bonzini <pbonzini@...hat.com>, linux-kernel@...r.kernel.org,
        kvm@...r.kernel.org
Subject: Re: [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared"
 argument from functions

У чт, 2023-09-28 у 12:29 -0400, Paolo Bonzini пише:
> Neither tdp_mmu_next_root nor kvm_tdp_mmu_put_root need to know
> if the lock is taken for read or write.  Either way, protection
> is achieved via RCU and tdp_mmu_pages_lock.  Remove the argument
> and just assert that the lock is taken.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
>  arch/x86/kvm/mmu/mmu.c     |  2 +-
>  arch/x86/kvm/mmu/tdp_mmu.c | 34 +++++++++++++++++++++-------------
>  arch/x86/kvm/mmu/tdp_mmu.h |  3 +--
>  3 files changed, 23 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index f7901cb4d2fa..64b1bdba943e 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3548,7 +3548,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
>  		return;
>  
>  	if (is_tdp_mmu_page(sp))
> -		kvm_tdp_mmu_put_root(kvm, sp, false);
> +		kvm_tdp_mmu_put_root(kvm, sp);
>  	else if (!--sp->root_count && sp->role.invalid)
>  		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
>  
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 6cd4dd631a2f..ab0876015be7 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -73,10 +73,13 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
>  	tdp_mmu_free_sp(sp);
>  }
>  
> -void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> -			  bool shared)
> +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
>  {
> -	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> +	/*
> +	 * Either read or write is okay, but the lock is needed because
> +	 * writers might not take tdp_mmu_pages_lock.
> +	 */
> +	lockdep_assert_held(&kvm->mmu_lock);

I double checked all callers and indeed at least the read lock is held.

>  
>  	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
>  		return;
> @@ -106,10 +109,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
>   */
>  static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
>  					      struct kvm_mmu_page *prev_root,
> -					      bool shared, bool only_valid)
> +					      bool only_valid)
>  {
>  	struct kvm_mmu_page *next_root;
>  
> +	/*
> +	 * While the roots themselves are RCU-protected, fields such as
> +	 * role.invalid are protected by mmu_lock.
> +	 */
> +	lockdep_assert_held(&kvm->mmu_lock);
> +
>  	rcu_read_lock();
>  
>  	if (prev_root)
> @@ -132,7 +141,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
>  	rcu_read_unlock();
>  
>  	if (prev_root)
> -		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
> +		kvm_tdp_mmu_put_root(kvm, prev_root);
>  
>  	return next_root;
>  }
> @@ -144,13 +153,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
>   * recent root. (Unless keeping a live reference is desirable.)
>   *
>   * If shared is set, this function is operating under the MMU lock in read
> - * mode. In the unlikely event that this thread must free a root, the lock
> - * will be temporarily dropped and reacquired in write mode.
> + * mode.
>   */
>  #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
> -	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);	\
> -	     _root;								\
> -	     _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))	\
> +	for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);	\
> +	     _root;							\
> +	     _root = tdp_mmu_next_root(_kvm, _root, _only_valid))	\
>  		if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&		\
>  		    kvm_mmu_page_as_id(_root) != _as_id) {			\
>  		} else
> @@ -159,9 +167,9 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
>  	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
>  
>  #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared)			\
> -	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false);		\
> -	     _root;								\
> -	     _root = tdp_mmu_next_root(_kvm, _root, _shared, false))		\
> +	for (_root = tdp_mmu_next_root(_kvm, NULL, false);		\
> +	     _root;							\
> +	     _root = tdp_mmu_next_root(_kvm, _root, false))
>  		if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) {		\
>  		} else
>  
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index 733a3aef3a96..20d97aa46c49 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -17,8 +17,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
>  	return refcount_inc_not_zero(&root->tdp_mmu_root_count);
>  }
>  
> -void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> -			  bool shared);
> +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
>  
>  bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
>  bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);


I don't know all of the details of the kvm mmu, so I might have missed something,
but still I need to get back to reviewing....

Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>


Best regards,
	Maxim Levitsky

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ