lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 29 Aug 2016 17:41:09 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     Waiman Long <waiman.long@....com>
Cc:     Linus Torvalds <torvalds@...ux-foundation.org>,
        Jason Low <jason.low2@....com>,
        Ding Tianhong <dingtianhong@...wei.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Will Deacon <Will.Deacon@....com>,
        Ingo Molnar <mingo@...hat.com>,
        Imre Deak <imre.deak@...el.com>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Davidlohr Bueso <dave@...olabs.net>,
        Tim Chen <tim.c.chen@...ux.intel.com>,
        Terry Rudd <terry.rudd@....com>,
        "Paul E. McKenney" <paulmck@...ibm.com>,
        Jason Low <jason.low2@...com>,
        Chris Wilson <chris@...is-wilson.co.uk>,
        Daniel Vetter <daniel.vetter@...ll.ch>
Subject: Re: [RFC][PATCH -v2 4/4] locking/mutex: Add lock handoff to avoid
 starvation

On Fri, Aug 26, 2016 at 07:40:34PM -0400, Waiman Long wrote:
> On 08/26/2016 11:18 AM, Peter Zijlstra wrote:

> >Still need to look at adding spinning to the handoff case.
> >Also need to look at writing (much) better changelogs, they stink.
> >
> 
> I have looked at the handoff code and I didn't see any problem.

So I found (or rather the buildbot did) a problem with it.

locking-selftest has testcases like:


	lock(&A);
	if (trylock(&A))
		/* fail */

  and

	ww_lock(&A)
	if (ww_lock(&A) != -EDEADLK)
		/* fail */

But with the 'trylock' accepting the lock if owner==current, in order to
accept the hand-off, this breaks in interesting ways.

Now, ARCH_MIN_TASKALIGN is at least 8 (mips, s390, parisc) which would
give us one more FLAG bit to play with.


The below seems to make things happy again..


--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -53,8 +53,9 @@ EXPORT_SYMBOL(__mutex_init);
 
 #define MUTEX_FLAG_WAITERS	0x01
 #define MUTEX_FLAG_HANDOFF	0x02
+#define MUTEX_FLAG_GIFT		0x04
 
-#define MUTEX_FLAGS		0x03
+#define MUTEX_FLAGS		0x07
 
 static inline struct task_struct *__owner_task(unsigned long owner)
 {
@@ -66,33 +67,6 @@ static inline unsigned long __owner_flag
 	return owner & MUTEX_FLAGS;
 }
 
-/*
- * Actual trylock that will work on any unlocked state.
- */
-static inline bool __mutex_trylock(struct mutex *lock)
-{
-	unsigned long owner, curr = (unsigned long)current;
-
-	owner = atomic_long_read(&lock->owner);
-	for (;;) { /* must loop, can race against a flag */
-		unsigned long old;
-
-		if (__owner_task(owner)) {
-			if ((unsigned long)__owner_task(owner) == curr)
-				return true;
-
-			return false;
-		}
-
-		old = atomic_long_cmpxchg_acquire(&lock->owner, owner,
-						  curr | __owner_flags(owner));
-		if (old == owner)
-			return true;
-
-		owner = old;
-	}
-}
-
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 /*
  * Optimistic trylock that only works in the uncontended case. Make sure to
@@ -134,6 +108,10 @@ static inline bool __mutex_waiter_is_fir
 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
 }
 
+/*
+ * Give up ownership to a specific task, when @task = NULL, this is equivalent
+ * to a regular unlock.
+ */
 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 {
 	unsigned long owner = atomic_long_read(&lock->owner);
@@ -146,7 +124,15 @@ static void __mutex_handoff(struct mutex
 #endif
 
 		new = (owner & MUTEX_FLAG_WAITERS);
-		new |= (unsigned long)task;
+		if (task) {
+			/*
+			 * To distinguish between a recursive lock attempt
+			 * and having been given the lock by someone else
+			 * we need to set the GIFT bit.
+			 */
+			new |= MUTEX_FLAG_GIFT; /* clear HANDOFF, set GIFT */
+			new |= (unsigned long)task;
+		}
 
 		old = atomic_long_cmpxchg(&lock->owner, owner, new);
 		if (old == owner)
@@ -154,6 +140,50 @@ static void __mutex_handoff(struct mutex
 
 		owner = old;
 	}
+}
+
+/*
+ * Someone handed us its lock ownership using __mutex_handoff(), say thank you
+ * and accept this nice gift.
+ */
+static bool __mutex_accept(struct mutex *lock, unsigned long owner)
+{
+	if (!(owner & MUTEX_FLAG_GIFT))
+		return false;
+
+	if (__owner_task(owner) != current)
+		return false;
+
+	__mutex_clear_flag(lock, MUTEX_FLAG_GIFT);
+	smp_mb__after_atomic(); /* ACQUIRE */
+	return true;
+}
+
+/*
+ * Actual trylock that will work on any unlocked state.
+ */
+static inline bool __mutex_trylock(struct mutex *lock)
+{
+	unsigned long owner, curr = (unsigned long)current;
+
+	owner = atomic_long_read(&lock->owner);
+	for (;;) { /* must loop, can race against a flag */
+		unsigned long old;
+
+		if (__owner_task(owner)) {
+			if (unlikely(owner & MUTEX_FLAG_GIFT))
+				return __mutex_accept(lock, owner);
+
+			return false;
+		}
+
+		old = atomic_long_cmpxchg_acquire(&lock->owner, owner,
+						  curr | __owner_flags(owner));
+		if (old == owner)
+			return true;
+
+		owner = old;
+	}
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ