lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 23 Sep 2015 18:06:42 -0700
From:	Davidlohr Bueso <dave@...olabs.net>
To:	Peter Zijlstra <peterz@...radead.org>,
	Ingo Molnar <mingo@...nel.org>,
	Thomas Gleixner <tglx@...utronix.de>
Cc:	Andrew Morton <akpm@...ux-foundation.org>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Will Deacon <will.deacon@....com>,
	"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
	linux-kernel@...r.kernel.org, Davidlohr Bueso <dbueso@...e.de>
Subject: [PATCH v2] locking/rtmutex: Use acquire/release semantics

From: Davidlohr Bueso <dave@...olabs.net>

As such, weakly ordered archs can benefit from more relaxed use
of barriers when locking/unlocking.

Signed-off-by: Davidlohr Bueso <dbueso@...e.de>
---

Changes from v1:
  - fix bogus acquire in unlock_rt_mutex_safe() (tglx)

  kernel/locking/rtmutex.c | 30 +++++++++++++++++++++---------
  1 file changed, 21 insertions(+), 9 deletions(-)

diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 35e9bfc..8251e75 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -74,14 +74,23 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
   * set up.
   */
  #ifndef CONFIG_DEBUG_RT_MUTEXES
-# define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
+# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
+# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
+# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
+
+/*
+ * Callers must hold the ->wait_lock -- which is the whole purpose as we force
+ * all future threads that attempt to [Rmw] the lock to the slowpath. As such
+ * relaxed semantics suffice.
+ */
  static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  {
  	unsigned long owner, *p = (unsigned long *) &lock->owner;
  
  	do {
  		owner = *p;
-	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+	} while (cmpxchg_relaxed(p, owner,
+				 owner | RT_MUTEX_HAS_WAITERS) != owner);
  }
  
  /*
@@ -121,11 +130,14 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
  	 *					lock(wait_lock);
  	 *					acquire(lock);
  	 */
-	return rt_mutex_cmpxchg(lock, owner, NULL);
+	return rt_mutex_cmpxchg_release(lock, owner, NULL);
  }
  
  #else
-# define rt_mutex_cmpxchg(l,c,n)	(0)
+# define rt_mutex_cmpxchg_relaxed(l,c,n)	(0)
+# define rt_mutex_cmpxchg_acquire(l,c,n)	(0)
+# define rt_mutex_cmpxchg_release(l,c,n)	(0)
+
  static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  {
  	lock->owner = (struct task_struct *)
@@ -1322,7 +1334,7 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
  				struct hrtimer_sleeper *timeout,
  				enum rtmutex_chainwalk chwalk))
  {
-	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
  		rt_mutex_deadlock_account_lock(lock, current);
  		return 0;
  	} else
@@ -1338,7 +1350,7 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
  				      enum rtmutex_chainwalk chwalk))
  {
  	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
-	    likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
  		rt_mutex_deadlock_account_lock(lock, current);
  		return 0;
  	} else
@@ -1349,7 +1361,7 @@ static inline int
  rt_mutex_fasttrylock(struct rt_mutex *lock,
  		     int (*slowfn)(struct rt_mutex *lock))
  {
-	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
  		rt_mutex_deadlock_account_lock(lock, current);
  		return 1;
  	}
@@ -1363,7 +1375,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
  {
  	WAKE_Q(wake_q);
  
-	if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
+	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
  		rt_mutex_deadlock_account_unlock(current);
  
  	} else {
@@ -1485,7 +1497,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
  				   struct wake_q_head *wqh)
  {
-	if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
+	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
  		rt_mutex_deadlock_account_unlock(current);
  		return false;
  	}
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ