lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <1374696746-40184-4-git-send-email-Waiman.Long@hp.com>
Date:	Wed, 24 Jul 2013 16:12:26 -0400
From:	Waiman Long <Waiman.Long@...com>
To:	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H. Peter Anvin" <hpa@...or.com>, Arnd Bergmann <arnd@...db.de>
Cc:	Waiman Long <Waiman.Long@...com>, linux-arch@...r.kernel.org,
	x86@...nel.org, linux-kernel@...r.kernel.org,
	Peter Zijlstra <peterz@...radead.org>,
	Steven Rostedt <rostedt@...dmis.org>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Richard Weinberger <richard@....at>,
	Catalin Marinas <catalin.marinas@....com>,
	Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
	Matt Fleming <matt.fleming@...el.com>,
	Herbert Xu <herbert@...dor.apana.org.au>,
	Akinobu Mita <akinobu.mita@...il.com>,
	Rusty Russell <rusty@...tcorp.com.au>,
	Michel Lespinasse <walken@...gle.com>,
	Andi Kleen <andi@...stfloor.org>,
	Rik van Riel <riel@...hat.com>,
	"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Raghavendra K T <raghavendra.kt@...ux.vnet.ibm.com>,
	George Spelvin <linux@...izon.com>,
	Harvey Harrison <harvey.harrison@...il.com>,
	"Chandramouleeswaran, Aswin" <aswin@...com>,
	"Norton, Scott J" <scott.norton@...com>
Subject: [PATCH v2 3/3] qrwlock: Optionally enable classic read/write lock behavior

By default, queue read/write lock is fair with respect to both the
readers and writers. However, there are situations where a bias
towards readers can increase throughput especially for reader-heavy
situations. There may also be cases where deviation from the classic
read/write lock behavior may cause problem like recursive read lock
in an interrupt handler with a waiting writer. Using the classic
behavior, however, will cause the queue read/write lock to lose some
of the fairness attribute that it has.

This patch enables lock owners to decide what behavior do they want
for their read/write lock by using the appropriate initializer. Two
types of initializers will be provided:
1. Default - fair to both readers and writers
2. Classic - readers that come after a waiting writer can steal
	     the lock

The classic initializers have a "_classic" suffix. If the queue
read/write lock feature is not enabled, the classic initializer will
be the same as the default initializer.

Signed-off-by: Waiman Long <Waiman.Long@...com>
---
 include/linux/rwlock.h         |   15 +++++++++++++++
 include/linux/rwlock_types.h   |   12 +++++++++++-
 include/linux/spinlock_types.h |    4 ++++
 lib/spinlock_debug.c           |   20 ++++++++++++++++++++
 4 files changed, 50 insertions(+), 1 deletions(-)

diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index bc2994e..234305a 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -23,9 +23,24 @@ do {								\
 								\
 	__rwlock_init((lock), #lock, &__key);			\
 } while (0)
+
+# ifdef CONFIG_QUEUE_RWLOCK
+  extern void __rwlock_init_classic(rwlock_t *lock, const char *name,
+				    struct lock_class_key *key);
+#  define rwlock_init_classic(lock)				\
+do {								\
+	static struct lock_class_key __key;			\
+								\
+	__rwlock_init_classic((lock), #lock, &__key);		\
+} while (0)
+# else
+#  define __rwlock_init_classic(l,n,k)	__rwlock_init(l,n,k)
+# endif /* CONFIG_QUEUE_RWLOCK */
 #else
 # define rwlock_init(lock)					\
 	do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
+# define rwlock_init_classic(lock)				\
+	do { *(lock) = __RW_LOCK_UNLOCKED_CLASSIC(lock); } while (0)
 #endif
 
 #ifdef CONFIG_DEBUG_SPINLOCK
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index cc0072e..0b1bd3b 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -37,12 +37,22 @@ typedef struct {
 				.owner = SPINLOCK_OWNER_INIT,		\
 				.owner_cpu = -1,			\
 				RW_DEP_MAP_INIT(lockname) }
+#define __RW_LOCK_UNLOCKED_CLASSIC(lockname)				\
+	(rwlock_t)	{	.raw_lock = __ARCH_RW_LOCK_UNLOCKED_CLASSIC,\
+				.magic = RWLOCK_MAGIC,			\
+				.owner = SPINLOCK_OWNER_INIT,		\
+				.owner_cpu = -1,			\
+				RW_DEP_MAP_INIT(lockname) }
 #else
 #define __RW_LOCK_UNLOCKED(lockname) \
 	(rwlock_t)	{	.raw_lock = __ARCH_RW_LOCK_UNLOCKED,	\
 				RW_DEP_MAP_INIT(lockname) }
+#define __RW_LOCK_UNLOCKED_CLASSIC(lockname) \
+	(rwlock_t)	{	.raw_lock = __ARCH_RW_LOCK_UNLOCKED_CLASSIC,\
+				RW_DEP_MAP_INIT(lockname) }
 #endif
 
-#define DEFINE_RWLOCK(x)	rwlock_t x = __RW_LOCK_UNLOCKED(x)
+#define DEFINE_RWLOCK(x)	 rwlock_t x = __RW_LOCK_UNLOCKED(x)
+#define DEFINE_RWLOCK_CLASSIC(x) rwlock_t x = __RW_LOCK_UNLOCKED_CLASSIC(x)
 
 #endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 73548eb..ff5554f 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -85,4 +85,8 @@ typedef struct spinlock {
 
 #include <linux/rwlock_types.h>
 
+#ifndef	__ARCH_RW_LOCK_UNLOCKED_CLASSIC
+#define	__ARCH_RW_LOCK_UNLOCKED_CLASSIC	__ARCH_RW_LOCK_UNLOCKED
+#endif
+
 #endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 0374a59..a765f17 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -49,6 +49,26 @@ void __rwlock_init(rwlock_t *lock, const char *name,
 
 EXPORT_SYMBOL(__rwlock_init);
 
+#ifdef CONFIG_QUEUE_RWLOCK
+void __rwlock_init_classic(rwlock_t *lock, const char *name,
+			   struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+	lockdep_init_map(&lock->dep_map, name, key, 0);
+#endif
+	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED_CLASSIC;
+	lock->magic = RWLOCK_MAGIC;
+	lock->owner = SPINLOCK_OWNER_INIT;
+	lock->owner_cpu = -1;
+}
+
+EXPORT_SYMBOL(__rwlock_init_classic);
+#endif /* CONFIG_QUEUE_RWLOCK */
+
 static void spin_dump(raw_spinlock_t *lock, const char *msg)
 {
 	struct task_struct *owner = NULL;
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ