lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 26 May 2016 09:31:27 +0100
From:	Chris Wilson <chris@...is-wilson.co.uk>
To:	Peter Zijlstra <peterz@...radead.org>,
	Ingo Molnar <mingo@...hat.com>
Cc:	intel-gfx@...ts.freedesktop.org,
	Chris Wilson <chris@...is-wilson.co.uk>,
	Christian König <christian.koenig@....com>,
	Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
	linux-kernel@...r.kernel.org
Subject: [PATCH] mutex: Do not spin/queue before performing ww_mutex deadlock avoidance

The ww_mutex has the property of allowing the lock to detect and report
when it may be used in deadlocking scenarios (to allow the caller to
unwind its locks and avoid the deadlock). This detection needs to be
performed before we queue up for the spin, otherwise we wait on the
osq_lock() for our turn to detect the deadlock that another thread is
spinning on, waiting for us. Otherwise as we are stuck behind our waiter,
throughput plummets.

This can be demonstrated by trying concurrent atomic modesets.

Testcase: igt/kms_cursor_legacy
Signed-off-by: Chris Wilson <chris@...is-wilson.co.uk>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Christian König <christian.koenig@....com>
Cc: Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>
Cc: linux-kernel@...r.kernel.org
---
 kernel/locking/mutex.c | 56 ++++++++++++++++++++++++++++++++------------------
 1 file changed, 36 insertions(+), 20 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index e364b424b019..d60f1ba3e64f 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -217,12 +217,35 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock,
 }
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+static bool ww_mutex_may_deadlock(struct mutex *lock,
+				  struct ww_acquire_ctx *ww_ctx)
+{
+	if (ww_ctx && ww_ctx->acquired > 0) {
+		struct ww_mutex *ww;
+
+		ww = container_of(lock, struct ww_mutex, base);
+		/*
+		 * If ww->ctx is set the contents are undefined, only
+		 * by acquiring wait_lock there is a guarantee that
+		 * they are not invalid when reading.
+		 *
+		 * As such, when deadlock detection needs to be
+		 * performed the optimistic spinning cannot be done.
+		 */
+		if (READ_ONCE(ww->ctx))
+			return true;
+	}
+
+	return false;
+}
+
 /*
  * Look out! "owner" is an entirely speculative pointer
  * access and not reliable.
  */
 static noinline
-bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
+bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
+			 struct ww_acquire_ctx *ww_ctx)
 {
 	bool ret = true;
 
@@ -241,6 +264,11 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
 			break;
 		}
 
+		if (ww_mutex_may_deadlock(lock, ww_ctx)) {
+			ret = false;
+			break;
+		}
+
 		cpu_relax_lowlatency();
 	}
 	rcu_read_unlock();
@@ -251,7 +279,8 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
 /*
  * Initial check for entering the mutex spinning loop
  */
-static inline int mutex_can_spin_on_owner(struct mutex *lock)
+static inline int mutex_can_spin_on_owner(struct mutex *lock,
+					  struct ww_acquire_ctx *ww_ctx)
 {
 	struct task_struct *owner;
 	int retval = 1;
@@ -259,6 +288,9 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
 	if (need_resched())
 		return 0;
 
+	if (ww_mutex_may_deadlock(lock, ww_ctx))
+		return 0;
+
 	rcu_read_lock();
 	owner = READ_ONCE(lock->owner);
 	if (owner)
@@ -308,7 +340,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
 {
 	struct task_struct *task = current;
 
-	if (!mutex_can_spin_on_owner(lock))
+	if (!mutex_can_spin_on_owner(lock, ww_ctx))
 		goto done;
 
 	/*
@@ -322,28 +354,12 @@ static bool mutex_optimistic_spin(struct mutex *lock,
 	while (true) {
 		struct task_struct *owner;
 
-		if (use_ww_ctx && ww_ctx->acquired > 0) {
-			struct ww_mutex *ww;
-
-			ww = container_of(lock, struct ww_mutex, base);
-			/*
-			 * If ww->ctx is set the contents are undefined, only
-			 * by acquiring wait_lock there is a guarantee that
-			 * they are not invalid when reading.
-			 *
-			 * As such, when deadlock detection needs to be
-			 * performed the optimistic spinning cannot be done.
-			 */
-			if (READ_ONCE(ww->ctx))
-				break;
-		}
-
 		/*
 		 * If there's an owner, wait for it to either
 		 * release the lock or go to sleep.
 		 */
 		owner = READ_ONCE(lock->owner);
-		if (owner && !mutex_spin_on_owner(lock, owner))
+		if (owner && !mutex_spin_on_owner(lock, owner, ww_ctx))
 			break;
 
 		/* Try to acquire the mutex if it is unlocked. */
-- 
2.8.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ