lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 23 Sep 2015 18:09:09 -0700
From:	Davidlohr Bueso <dave@...olabs.net>
To:	Peter Zijlstra <peterz@...radead.org>,
	Ingo Molnar <mingo@...nel.org>,
	Thomas Gleixner <tglx@...utronix.de>
Cc:	Andrew Morton <akpm@...ux-foundation.org>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Will Deacon <will.deacon@....com>,
	"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
	linux-kernel@...r.kernel.org, Davidlohr Bueso <dbueso@...e.de>
Subject: [PATCH v2] locking/rwsem: Use acquire/release semantics

From: Davidlohr Bueso <dave@...olabs.net>

As such, weakly ordered archs can benefit from more relaxed use
of barriers when locking/unlocking.

Signed-off-by: Davidlohr Bueso <dbueso@...e.de>
---

Changes from v1:
  Fix bogus acquire in generic lock downgrade (Linus)

  include/asm-generic/rwsem.h | 21 ++++++++++++++-------
  kernel/locking/rwsem-xadd.c |  5 +++--
  2 files changed, 17 insertions(+), 9 deletions(-)

diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index d48bf5a..d01e313 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -33,7 +33,7 @@
   */
  static inline void __down_read(struct rw_semaphore *sem)
  {
-	if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
+	if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
  		rwsem_down_read_failed(sem);
  }
  
@@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
  	long tmp;
  
  	while ((tmp = sem->count) >= 0) {
-		if (tmp == cmpxchg(&sem->count, tmp,
+		if (tmp == cmpxchg_acquire(&sem->count, tmp,
  				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
  			return 1;
  		}
@@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  {
  	long tmp;
  
-	tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+	tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
  				     (atomic_long_t *)&sem->count);
  	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  		rwsem_down_write_failed(sem);
@@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
  {
  	long tmp;
  
-	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+	tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
  		      RWSEM_ACTIVE_WRITE_BIAS);
  	return tmp == RWSEM_UNLOCKED_VALUE;
  }
@@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem)
  {
  	long tmp;
  
-	tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
+	tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
  	if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
  		rwsem_wake(sem);
  }
@@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem)
   */
  static inline void __up_write(struct rw_semaphore *sem)
  {
-	if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+	if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
  				 (atomic_long_t *)&sem->count) < 0))
  		rwsem_wake(sem);
  }
@@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
  {
  	long tmp;
  
-	tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
+	/*
+	 * When downgrading from exclusive to shared ownership,
+	 * anything inside the write-locked region cannot leak
+	 * into the read side. In contrast, anything in the
+	 * read-locked region is ok to be re-ordered into the
+	 * write side. As such, use RELEASE semantics.
+	 */
+	tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
  				     (atomic_long_t *)&sem->count);
  	if (tmp < 0)
  		rwsem_downgrade_wake(sem);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 0f18971..a4d4de0 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
  	 * to reduce unnecessary expensive cmpxchg() operations.
  	 */
  	if (count == RWSEM_WAITING_BIAS &&
-	    cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
+	    cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
  		    RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
  		if (!list_is_singular(&sem->wait_list))
  			rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
@@ -285,7 +285,8 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
  		if (!(count == 0 || count == RWSEM_WAITING_BIAS))
  			return false;
  
-		old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
+		old = cmpxchg_acquire(&sem->count, count,
+				      count + RWSEM_ACTIVE_WRITE_BIAS);
  		if (old == count) {
  			rwsem_set_owner(sem);
  			return true;
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists