lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 16 Feb 2009 21:18:30 +0100
From:	Peter Zijlstra <peterz@...radead.org>
To:	Ingo Molnar <mingo@...e.hu>
Cc:	lkml <linux-kernel@...r.kernel.org>,
	Johannes Weiner <hannes@...xchg.org>
Subject: [PATCH] mutex: enable spinning for DEBUG_MUTEX

Subject: mutex: enable spinning for DEBUG_MUTEX
From: Johannes Weiner <hannes@...xchg.org>
Date: Thu, 15 Jan 2009 15:57:38 +0100

Spinning with CONFIG_DEBUG_MUTEX is possible but care must be taken
with the debug checks vs. racy access to the lock owner field.

In the case where the lock owner gets rescheduled after acquisition
and therefor unlocks on another CPU, that second CPU might not see the
write of the owner field from the first CPU and we warn about owner
mismatch.  Thus the memory barriers for the debug case.

Also remove the redundant mutex_set_owner()s in __mutex_lock_common()
and __mutex_trylock_slowpath(), it's taken care of in mutex_lock() and
mutex_trylock().

[a.p.zijlstra@...llo.nl: fix _nested variants]
Signed-off-by: Johannes Weiner <hannes@...xchg.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
---
 kernel/mutex-debug.c |    6 ++++++
 kernel/mutex-debug.h |    6 ++++++
 kernel/mutex.c       |   27 +++++++++++++++------------
 3 files changed, 27 insertions(+), 12 deletions(-)

Index: linux-2.6/kernel/mutex.c
===================================================================
--- linux-2.6.orig/kernel/mutex.c
+++ linux-2.6/kernel/mutex.c
@@ -148,7 +148,7 @@ __mutex_lock_common(struct mutex *lock, 
 
 	preempt_disable();
 	mutex_acquire(&lock->dep_map, subclass, 0, ip);
-#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_SMP)
 	/*
 	 * Optimistic spinning.
 	 *
@@ -162,9 +162,6 @@ __mutex_lock_common(struct mutex *lock, 
 	 * Since this needs the lock owner, and this mutex implementation
 	 * doesn't track the owner atomically in the lock field, we need to
 	 * track it non-atomically.
-	 *
-	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
-	 * to serialize everything.
 	 */
 
 	for (;;) {
@@ -180,7 +177,6 @@ __mutex_lock_common(struct mutex *lock, 
 
 		if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
 			lock_acquired(&lock->dep_map, ip);
-			mutex_set_owner(lock);
 			preempt_enable();
 			return 0;
 		}
@@ -256,7 +252,6 @@ done:
 	lock_acquired(&lock->dep_map, ip);
 	/* got the lock - rejoice! */
 	mutex_remove_waiter(lock, &waiter, current_thread_info());
-	mutex_set_owner(lock);
 
 	/* set it to 0 if there are no waiters left: */
 	if (likely(list_empty(&lock->wait_list)))
@@ -276,6 +271,7 @@ mutex_lock_nested(struct mutex *lock, un
 {
 	might_sleep();
 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
+	mutex_set_owner(lock);
 }
 
 EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -283,17 +279,26 @@ EXPORT_SYMBOL_GPL(mutex_lock_nested);
 int __sched
 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
 {
+	int ret;
+
 	might_sleep();
-	return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
+	ret = __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
+	mutex_set_owner(lock);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
 
 int __sched
 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 {
+	int ret;
+
 	might_sleep();
-	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
-				   subclass, _RET_IP_);
+	ret = __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
+	mutex_set_owner(lock);
+
+	return ret;
 }
 
 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -432,10 +437,8 @@ static inline int __mutex_trylock_slowpa
 	spin_lock_mutex(&lock->wait_lock, flags);
 
 	prev = atomic_xchg(&lock->count, -1);
-	if (likely(prev == 1)) {
-		mutex_set_owner(lock);
+	if (likely(prev == 1))
 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-	}
 
 	/* Set it back to 0 if there are no waiters: */
 	if (likely(list_empty(&lock->wait_list)))
Index: linux-2.6/kernel/mutex-debug.c
===================================================================
--- linux-2.6.orig/kernel/mutex-debug.c
+++ linux-2.6/kernel/mutex-debug.c
@@ -74,6 +74,12 @@ void debug_mutex_unlock(struct mutex *lo
 		return;
 
 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
+	/*
+	 * We might have been rescheduled after taking the lock.  Make sure
+	 * the new CPU sees the setting of the owner field.  Corresponding
+	 * write side is mutex_set_owner().
+	 */
+	smp_rmb();
 	DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
 	DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
 	mutex_clear_owner(lock);
Index: linux-2.6/kernel/mutex-debug.h
===================================================================
--- linux-2.6.orig/kernel/mutex-debug.h
+++ linux-2.6/kernel/mutex-debug.h
@@ -30,6 +30,12 @@ extern void debug_mutex_init(struct mute
 static inline void mutex_set_owner(struct mutex *lock)
 {
 	lock->owner = current_thread_info();
+	/*
+	 * When the acquirer gets rescheduled after taking the
+	 * lock, make sure the owner setting is visible to
+	 * debug_mutex_unlock() on the new CPU.
+	 */
+	smp_wmb();
 }
 
 static inline void mutex_clear_owner(struct mutex *lock)


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ