lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:	Tue, 19 Nov 2013 17:37:47 -0800
From:	Tim Chen <tim.c.chen@...ux.intel.com>
To:	Ingo Molnar <mingo@...e.hu>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Thomas Gleixner <tglx@...utronix.de>
Cc:	linux-kernel@...r.kernel.org, linux-mm <linux-mm@...ck.org>,
	linux-arch@...r.kernel.org,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Waiman Long <waiman.long@...com>,
	Andrea Arcangeli <aarcange@...hat.com>,
	Alex Shi <alex.shi@...aro.org>,
	Andi Kleen <andi@...stfloor.org>,
	Michel Lespinasse <walken@...gle.com>,
	Davidlohr Bueso <davidlohr.bueso@...com>,
	Matthew R Wilcox <matthew.r.wilcox@...el.com>,
	Dave Hansen <dave.hansen@...el.com>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>,
	Rik van Riel <riel@...hat.com>,
	Peter Hurley <peter@...leysoftware.com>,
	"Paul E.McKenney" <paulmck@...ux.vnet.ibm.com>,
	Tim Chen <tim.c.chen@...ux.intel.com>,
	Raghavendra K T <raghavendra.kt@...ux.vnet.ibm.com>,
	George Spelvin <linux@...izon.com>,
	"H. Peter Anvin" <hpa@...or.com>, Arnd Bergmann <arnd@...db.de>,
	Aswin Chandramouleeswaran <aswin@...com>,
	Scott J Norton <scott.norton@...com>,
	Will Deacon <will.deacon@....com>,
	"Figo.zhang" <figo1802@...il.com>
Subject: [PATCH v6 5/5] MCS Lock: Allows for architecture specific mcs lock
 and unlock

Restructure code to allow for architecture specific defines
of the arch_mcs_spin_lock and arch_mcs_spin_unlock funtion
that can be optimized for specific architecture.  These
arch specific functions can be placed in asm/mcs_spinlock.h.
Otherwise the default arch_mcs_spin_lock and arch_mcs_spin_unlock
will be used.

Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
---
 arch/Kconfig                  |  3 ++
 include/linux/mcs_spinlock.h  |  5 +++
 kernel/locking/mcs_spinlock.c | 93 +++++++++++++++++++++++++------------------
 3 files changed, 62 insertions(+), 39 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index ded747c..c96c696 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -306,6 +306,9 @@ config HAVE_CMPXCHG_LOCAL
 config HAVE_CMPXCHG_DOUBLE
 	bool
 
+config HAVE_ARCH_MCS_LOCK
+	bool
+
 config ARCH_WANT_IPC_PARSE_VERSION
 	bool
 
diff --git a/include/linux/mcs_spinlock.h b/include/linux/mcs_spinlock.h
index d54bb23..d64786a 100644
--- a/include/linux/mcs_spinlock.h
+++ b/include/linux/mcs_spinlock.h
@@ -12,6 +12,11 @@
 #ifndef __LINUX_MCS_SPINLOCK_H
 #define __LINUX_MCS_SPINLOCK_H
 
+/* arch specific mcs lock and unlock functions defined here */
+#ifdef CONFIG_HAVE_ARCH_MCS_LOCK
+#include <asm/mcs_spinlock.h>
+#endif
+
 struct mcs_spinlock {
 	struct mcs_spinlock *next;
 	int locked; /* 1 if lock acquired */
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index 6f2ce8e..582584a 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -29,28 +29,36 @@
  * on this node->locked until the previous lock holder sets the node->locked
  * in mcs_spin_unlock().
  */
+#ifndef arch_mcs_spin_lock
+#define arch_mcs_spin_lock(lock, node)					\
+{									\
+	struct mcs_spinlock *prev;					\
+									\
+	/* Init node */							\
+	node->locked = 0;						\
+	node->next   = NULL;						\
+									\
+	/* xchg() provides a memory barrier */				\
+	prev = xchg(lock, node);					\
+	if (likely(prev == NULL)) {					\
+		/* Lock acquired */					\
+		return;							\
+	}								\
+	ACCESS_ONCE(prev->next) = node;					\
+	/*								\
+	 * Wait until the lock holder passes the lock down.		\
+	 * Using smp_load_acquire() provides a memory barrier that	\
+	 * ensures subsequent operations happen after the lock is	\
+	 * acquired.							\
+	 */								\
+	while (!(smp_load_acquire(&node->locked)))			\
+		arch_mutex_cpu_relax();					\
+}
+#endif
+
 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 {
-	struct mcs_spinlock *prev;
-
-	/* Init node */
-	node->locked = 0;
-	node->next   = NULL;
-
-	/* xchg() provides a memory barrier */
-	prev = xchg(lock, node);
-	if (likely(prev == NULL)) {
-		/* Lock acquired */
-		return;
-	}
-	ACCESS_ONCE(prev->next) = node;
-	/*
-	 * Wait until the lock holder passes the lock down.
-	 * Using smp_load_acquire() provides a memory barrier that
-	 * ensures subsequent operations happen after the lock is acquired.
-	 */
-	while (!(smp_load_acquire(&node->locked)))
-		arch_mutex_cpu_relax();
+	arch_mcs_spin_lock(lock, node);
 }
 EXPORT_SYMBOL_GPL(mcs_spin_lock);
 
@@ -58,26 +66,33 @@ EXPORT_SYMBOL_GPL(mcs_spin_lock);
  * Releases the lock. The caller should pass in the corresponding node that
  * was used to acquire the lock.
  */
+#ifndef arch_mcs_spin_unlock
+#define arch_mcs_spin_unlock(lock, node)				\
+{									\
+	struct mcs_spinlock *next = ACCESS_ONCE(node->next);		\
+									\
+	if (likely(!next)) {						\
+		/*							\
+		 * Release the lock by setting it to NULL               \
+		 */							\
+		if (likely(cmpxchg(lock, node, NULL) == node))          \
+			return;                                         \
+		/* Wait until the next pointer is set */		\
+		while (!(next = ACCESS_ONCE(node->next)))		\
+			arch_mutex_cpu_relax();				\
+	}								\
+	/*								\
+	 * Pass lock to next waiter.					\
+	 * smp_store_release() provides a memory barrier to ensure	\
+	 * all operations in the critical section has been completed	\
+	 * before unlocking.						\
+	 */								\
+	smp_store_release(&next->locked, 1);				\
+}
+#endif
+
 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 {
-	struct mcs_spinlock *next = ACCESS_ONCE(node->next);
-
-	if (likely(!next)) {
-		/*
-		 * Release the lock by setting it to NULL
-		 */
-		if (likely(cmpxchg(lock, node, NULL) == node))
-			return;
-		/* Wait until the next pointer is set */
-		while (!(next = ACCESS_ONCE(node->next)))
-			arch_mutex_cpu_relax();
-	}
-	/*
-	 * Pass lock to next waiter.
-	 * smp_store_release() provides a memory barrier to ensure
-	 * all operations in the critical section has been completed
-	 * before unlocking.
-	 */
-	smp_store_release(&next->locked, 1);
+	arch_mcs_spin_unlock(lock, node);
 }
 EXPORT_SYMBOL_GPL(mcs_spin_unlock);
-- 
1.7.11.7


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ