lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 28 Jan 2014 11:13:15 -0800
From:	Jason Low <jason.low2@...com>
To:	mingo@...hat.com, peterz@...radead.org, paulmck@...ux.vnet.ibm.com,
	Waiman.Long@...com, torvalds@...ux-foundation.org,
	tglx@...utronix.de, jason.low2@...com
Cc:	linux-kernel@...r.kernel.org, riel@...hat.com,
	akpm@...ux-foundation.org, davidlohr@...com, hpa@...or.com,
	andi@...stfloor.org, aswin@...com, scott.norton@...com,
	chegu_vinod@...com
Subject: [RFC][PATCH v2 4/5] mutex: Disable preemtion between modifying lock->owner and locking/unlocking mutex

This RFC patch disables preemption between modifying lock->owner and
locking/unlocking the mutex lock. This prevents situations where the owner
can preempt between those 2 operations, which causes optimistic spinners to
be unable to check if lock->owner is not on CPU. As mentioned in the
thread for this v1 patchset, disabling preemption is a cheap operation.

Signed-off-by: Jason Low <jason.low2@...com>
---
 kernel/locking/mutex.c |   30 ++++++++++++++++++++++++++++--
 1 files changed, 28 insertions(+), 2 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 6d85b08..cfaaf53 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -98,8 +98,10 @@ void __sched mutex_lock(struct mutex *lock)
 	 * The locking fastpath is the 1->0 transition from
 	 * 'unlocked' into 'locked' state.
 	 */
+	preempt_disable();
 	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
 	mutex_set_owner(lock);
+	preempt_enable();
 }
 
 EXPORT_SYMBOL(mutex_lock);
@@ -253,9 +255,13 @@ void __sched mutex_unlock(struct mutex *lock)
 	 * the slow path will always be taken, and that clears the owner field
 	 * after verifying that it was indeed current.
 	 */
+	preempt_disable();
 	mutex_clear_owner(lock);
 #endif
 	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
+#ifndef CONFIG_DEBUG_MUTEXES
+	preempt_enable();
+#endif
 }
 
 EXPORT_SYMBOL(mutex_unlock);
@@ -292,9 +298,13 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
 	 * the slow path will always be taken, and that clears the owner field
 	 * after verifying that it was indeed current.
 	 */
+	preempt_disable();
 	mutex_clear_owner(&lock->base);
 #endif
 	__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
+#ifndef CONFIG_DEBUG_MUTEXES
+	preempt_enable();
+#endif
 }
 EXPORT_SYMBOL(ww_mutex_unlock);
 
@@ -780,12 +790,16 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
 	int ret;
 
 	might_sleep();
+	preempt_disable();
 	ret =  __mutex_fastpath_lock_retval(&lock->count);
 	if (likely(!ret)) {
 		mutex_set_owner(lock);
+		preempt_enable();
 		return 0;
-	} else
+	} else {
+		preempt_enable();
 		return __mutex_lock_interruptible_slowpath(lock);
+	}
 }
 
 EXPORT_SYMBOL(mutex_lock_interruptible);
@@ -795,12 +809,16 @@ int __sched mutex_lock_killable(struct mutex *lock)
 	int ret;
 
 	might_sleep();
+	preempt_disable();
 	ret = __mutex_fastpath_lock_retval(&lock->count);
 	if (likely(!ret)) {
 		mutex_set_owner(lock);
+		preempt_enable();
 		return 0;
-	} else
+	} else {
+		preempt_enable();
 		return __mutex_lock_killable_slowpath(lock);
+	}
 }
 EXPORT_SYMBOL(mutex_lock_killable);
 
@@ -889,9 +907,11 @@ int __sched mutex_trylock(struct mutex *lock)
 {
 	int ret;
 
+	preempt_disable();
 	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
 	if (ret)
 		mutex_set_owner(lock);
+	preempt_enable();
 
 	return ret;
 }
@@ -904,6 +924,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 	int ret;
 
 	might_sleep();
+	preempt_disable();
 
 	ret = __mutex_fastpath_lock_retval(&lock->base.count);
 
@@ -912,6 +933,8 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 		mutex_set_owner(&lock->base);
 	} else
 		ret = __ww_mutex_lock_slowpath(lock, ctx);
+
+	preempt_enable();
 	return ret;
 }
 EXPORT_SYMBOL(__ww_mutex_lock);
@@ -922,6 +945,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 	int ret;
 
 	might_sleep();
+	preempt_disable();
 
 	ret = __mutex_fastpath_lock_retval(&lock->base.count);
 
@@ -930,6 +954,8 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 		mutex_set_owner(&lock->base);
 	} else
 		ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
+
+	preempt_enable();
 	return ret;
 }
 EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists