lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87360hy5hp.fsf@x220.int.ebiederm.org>
Date:   Mon, 07 Dec 2020 09:56:34 -0600
From:   ebiederm@...ssion.com (Eric W. Biederman)
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     Waiman Long <longman@...hat.com>, linux-kernel@...r.kernel.org,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
        Jann Horn <jannh@...gle.com>,
        Vasiliy Kulikov <segoon@...nwall.com>,
        Al Viro <viro@...iv.linux.org.uk>,
        Bernd Edlinger <bernd.edlinger@...mail.de>,
        Oleg Nesterov <oleg@...hat.com>,
        Christopher Yeoh <cyeoh@....ibm.com>,
        Cyrill Gorcunov <gorcunov@...il.com>,
        Sargun Dhillon <sargun@...gun.me>,
        Christian Brauner <christian.brauner@...ntu.com>,
        Arnd Bergmann <arnd@...db.de>,
        Arnaldo Carvalho de Melo <acme@...nel.org>
Subject: Re: [PATCH 2/3] rwsem: Implement down_read_interruptible

Peter Zijlstra <peterz@...radead.org> writes:

> On Thu, Dec 03, 2020 at 08:59:13PM -0500, Waiman Long wrote:
>> On 12/3/20 3:11 PM, Eric W. Biederman wrote:
>
>> > +static inline int __down_read_interruptible(struct rw_semaphore *sem)
>> > +{
>> > +	if (!rwsem_read_trylock(sem)) {
>> > +		if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
>> > +			return -EINTR;
>> > +		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
>> > +	} else {
>> > +		rwsem_set_reader_owned(sem);
>> > +	}
>> > +	return 0;
>> > +}
>> > +
>> >   static inline int __down_read_killable(struct rw_semaphore *sem)
>> >   {
>> >   	if (!rwsem_read_trylock(sem)) {
>> > @@ -1495,6 +1507,20 @@ void __sched down_read(struct rw_semaphore *sem)
>> >   }
>> >   EXPORT_SYMBOL(down_read);
>> > +int __sched down_read_interruptible(struct rw_semaphore *sem)
>> > +{
>> > +	might_sleep();
>> > +	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
>> > +
>> > +	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
>> > +		rwsem_release(&sem->dep_map, _RET_IP_);
>> > +		return -EINTR;
>> > +	}
>> > +
>> > +	return 0;
>> > +}
>> > +EXPORT_SYMBOL(down_read_interruptible);
>> > +
>> >   int __sched down_read_killable(struct rw_semaphore *sem)
>> >   {
>> >   	might_sleep();
>> 
>> Acked-by: Waiman Long <longman@...hat.com>
>
> Yeah, that seems correct.. There's an unfortunate amount of copy-paste
> there though.
>
> Do we want to follow that up with something like this?

Do you want to pull these two into a topic branch in the tip tree
based on v10-rc1?

That way we can share these two patches, and then you folks can make
your locking cleanups and I can change exec_update_mutex to a rw_semaphore?

Eric

> ---
> --- a/kernel/locking/rwsem.c
> +++ b/kernel/locking/rwsem.c
> @@ -275,7 +275,25 @@ static inline bool rwsem_read_trylock(st
>  	long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
>  	if (WARN_ON_ONCE(cnt < 0))
>  		rwsem_set_nonspinnable(sem);
> -	return !(cnt & RWSEM_READ_FAILED_MASK);
> +
> +	if (!(cnt & RWSEM_READ_FAILED_MASK)) {
> +		rwsem_set_reader_owned(sem);
> +		return true;
> +	}
> +
> +	return false;
> +}
> +
> +static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
> +{
> +	long tmp = RWSEM_UNLOCKED_VALUE;
> +
> +	if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
> +		rwsem_set_owner(sem);
> +		return true;
> +	}
> +
> +	return false;
>  }
>  
>  /*
> @@ -1335,38 +1353,29 @@ static struct rw_semaphore *rwsem_downgr
>  /*
>   * lock for reading
>   */
> -static inline void __down_read(struct rw_semaphore *sem)
> +static inline int __down_read_common(struct rw_semaphore *sem, int state)
>  {
>  	if (!rwsem_read_trylock(sem)) {
> -		rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
> +		if (IS_ERR(rwsem_down_read_slowpath(sem, state)))
> +			return -EINTR;
>  		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
> -	} else {
> -		rwsem_set_reader_owned(sem);
>  	}
> +	return 0;
>  }
>  
> -static inline int __down_read_interruptible(struct rw_semaphore *sem)
> +static __always_inline void __down_read(struct rw_semaphore *sem)
>  {
> -	if (!rwsem_read_trylock(sem)) {
> -		if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
> -			return -EINTR;
> -		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
> -	} else {
> -		rwsem_set_reader_owned(sem);
> -	}
> -	return 0;
> +	__down_read_common(sem, TASK_UNINTERRUPTIBLE);
>  }
>  
> -static inline int __down_read_killable(struct rw_semaphore *sem)
> +static __always_inline int __down_read_interruptible(struct rw_semaphore *sem)
>  {
> -	if (!rwsem_read_trylock(sem)) {
> -		if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
> -			return -EINTR;
> -		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
> -	} else {
> -		rwsem_set_reader_owned(sem);
> -	}
> -	return 0;
> +	return __down_read_common(sem, TASK_INTERRUPTIBLE);
> +}
> +
> +static __always_inline int __down_read_killable(struct rw_semaphore *sem)
> +{
> +	return __down_read_common(sem, TASK_KILLABLE);
>  }
>  
>  static inline int __down_read_trylock(struct rw_semaphore *sem)
> @@ -1392,44 +1401,29 @@ static inline int __down_read_trylock(st
>  /*
>   * lock for writing
>   */
> -static inline void __down_write(struct rw_semaphore *sem)
> +static inline int __down_write_common(struct rw_semaphore *sem, int state)
>  {
> -	long tmp = RWSEM_UNLOCKED_VALUE;
> -
> -	if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
> -						      RWSEM_WRITER_LOCKED)))
> -		rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
> -	else
> -		rwsem_set_owner(sem);
> +	if (unlikely(!rwsem_write_trylock(sem))) {
> +		if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
> +			return -EINTR;
> +	}
> +	return 0;
>  }
>  
> -static inline int __down_write_killable(struct rw_semaphore *sem)
> +static __always_inline void __down_write(struct rw_semaphore *sem)
>  {
> -	long tmp = RWSEM_UNLOCKED_VALUE;
> +	__down_write_common(sem, TASK_UNINTERRUPTIBLE);
> +}
>  
> -	if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
> -						      RWSEM_WRITER_LOCKED))) {
> -		if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
> -			return -EINTR;
> -	} else {
> -		rwsem_set_owner(sem);
> -	}
> -	return 0;
> +static __always_inline int __down_write_killable(struct rw_semaphore *sem)
> +{
> +	return __down_write_common(sem, TASK_KILLABLE);
>  }
>  
>  static inline int __down_write_trylock(struct rw_semaphore *sem)
>  {
> -	long tmp;
> -
>  	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
> -
> -	tmp  = RWSEM_UNLOCKED_VALUE;
> -	if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
> -					    RWSEM_WRITER_LOCKED)) {
> -		rwsem_set_owner(sem);
> -		return true;
> -	}
> -	return false;
> +	return rwsem_write_trylock(sem);
>  }
>  
>  /*

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ