lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1482346000-9927-11-git-send-email-nhaehnle@gmail.com>
Date:   Wed, 21 Dec 2016 19:46:38 +0100
From:   Nicolai Hähnle <nhaehnle@...il.com>
To:     linux-kernel@...r.kernel.org
Cc:     Nicolai Hähnle <Nicolai.Haehnle@....com>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Maarten Lankhorst <dev@...ankhorst.nl>,
        Daniel Vetter <daniel@...ll.ch>,
        Chris Wilson <chris@...is-wilson.co.uk>,
        dri-devel@...ts.freedesktop.org
Subject: [PATCH v3 10/12] locking/ww_mutex: Yield to other waiters from optimistic spin

From: Nicolai Hähnle <Nicolai.Haehnle@....com>

Lock stealing is less beneficial for w/w mutexes since we may just end up
backing off if we stole from a thread with an earlier acquire stamp that
already holds another w/w mutex that we also need. So don't spin
optimistically unless we are sure that there is no other waiter that might
cause us to back off.

Median timings taken of a contention-heavy GPU workload:

Before:
real    0m52.946s
user    0m7.272s
sys     1m55.964s

After:
real    0m53.086s
user    0m7.360s
sys     1m46.204s

This particular workload still spends 20%-25% of CPU in mutex_spin_on_owner
according to perf, but my attempts to further reduce this spinning based on
various heuristics all lead to an increase in measured wall time despite
the decrease in sys time.

Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Maarten Lankhorst <dev@...ankhorst.nl>
Cc: Daniel Vetter <daniel@...ll.ch>
Cc: Chris Wilson <chris@...is-wilson.co.uk>
Cc: dri-devel@...ts.freedesktop.org
Signed-off-by: Nicolai Hähnle <Nicolai.Haehnle@....com>
---
 kernel/locking/mutex.c | 45 ++++++++++++++++++++++++++++++++++++---------
 1 file changed, 36 insertions(+), 9 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 6f62695..0bafb37 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -374,7 +374,8 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock,
  */
 static noinline
 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
-			 bool use_ww_ctx, struct ww_acquire_ctx *ww_ctx)
+			 bool use_ww_ctx, struct ww_acquire_ctx *ww_ctx,
+			 struct mutex_waiter *waiter)
 {
 	bool ret = true;
 
@@ -397,7 +398,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
 			break;
 		}
 
-		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
+		if (use_ww_ctx && ww_ctx) {
 			struct ww_mutex *ww;
 
 			ww = container_of(lock, struct ww_mutex, base);
@@ -413,7 +414,30 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
 			 * Check this in every inner iteration because we may
 			 * be racing against another thread's ww_mutex_lock.
 			 */
-			if (READ_ONCE(ww->ctx)) {
+			if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) {
+				ret = false;
+				break;
+			}
+
+			/*
+			 * If we aren't on the wait list yet, cancel the spin
+			 * if there are waiters. We want  to avoid stealing the
+			 * lock from a waiter with an earlier stamp, since the
+			 * other thread may already own a lock that we also
+			 * need.
+			 */
+			if (!waiter &&
+			    (atomic_long_read(&lock->owner) &
+			     MUTEX_FLAG_WAITERS)) {
+				ret = false;
+				break;
+			}
+
+			/*
+			 * Similarly, stop spinning if we are no longer the
+			 * first waiter.
+			 */
+			if (waiter && !__mutex_waiter_is_first(lock, waiter)) {
 				ret = false;
 				break;
 			}
@@ -479,7 +503,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
  */
 static bool mutex_optimistic_spin(struct mutex *lock,
 				  struct ww_acquire_ctx *ww_ctx,
-				  const bool use_ww_ctx, const bool waiter)
+				  const bool use_ww_ctx,
+				  struct mutex_waiter *waiter)
 {
 	struct task_struct *task = current;
 
@@ -518,12 +543,12 @@ static bool mutex_optimistic_spin(struct mutex *lock,
 			}
 
 			if (!mutex_spin_on_owner(lock, owner, use_ww_ctx,
-						 ww_ctx))
+						 ww_ctx, waiter))
 				goto fail_unlock;
 		}
 
 		/* Try to acquire the mutex if it is unlocked. */
-		if (__mutex_trylock(lock, waiter))
+		if (__mutex_trylock(lock, waiter != NULL))
 			break;
 
 		/*
@@ -565,7 +590,8 @@ static bool mutex_optimistic_spin(struct mutex *lock,
 #else
 static bool mutex_optimistic_spin(struct mutex *lock,
 				  struct ww_acquire_ctx *ww_ctx,
-				  const bool use_ww_ctx, const bool waiter)
+				  const bool use_ww_ctx,
+				  struct mutex_waiter *waiter)
 {
 	return false;
 }
@@ -737,7 +763,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
 	if (__mutex_trylock(lock, false) ||
-	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
+	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
 		/* got the lock, yay! */
 		lock_acquired(&lock->dep_map, ip);
 		if (use_ww_ctx && ww_ctx)
@@ -843,7 +869,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		}
 
 		if ((first &&
-		     mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
+		     mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx,
+					   &waiter)) ||
 		    __mutex_trylock(lock, handoff))
 			break;
 
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ