lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Sun, 25 Jan 2009 12:50:23 -0800 (PST)
From:	Frederic Weisbecker <fweisbec@...il.com>
To:	Ingo Molnar <mingo@...e.hu>
Cc:	linux-kernel@...r.kernel.org,
	Andrew Morton <akpm@...ux-foundation.org>,
	Mandeep Singh Baines <msb@...gle.com>,
	Frederic Weisbecker <fweisbec@...il.com>
Subject: [RFC][PATCH 2/2] add a counter for writers spinning on a rwlock

This patch adds a counter for writers that enter a rwlock slow path.
For example it can be useful for slow background tasks which perform some jobs
on the tasklist, such as the hung_task detector (kernel/hung_task.c).

It adds a inc/dec pair on the slow path and 4 bytes for each rwlocks, so the overhead
is not null.

Only x86 is supported for now, writers_spinning_lock() will return 0 on other archs (which
is perhaps not a good idea).

Comments?

Signed-off-by: Frederic Weisbecker <fweisbec@...il.com>
---
 arch/Kconfig                          |    4 ++++
 arch/x86/Kconfig                      |    1 +
 arch/x86/include/asm/spinlock.h       |    5 +++++
 arch/x86/include/asm/spinlock_types.h |    1 +
 arch/x86/lib/rwlock_64.S              |   10 +++++++---
 include/linux/spinlock.h              |    7 +++++++
 6 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 550dab2..86c22e0 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -106,3 +106,7 @@ config HAVE_CLK
 	  The <linux/clk.h> calls support software clock gating and
 	  thus are a key power management tool on many systems.
 
+config HAVE_NB_WRITERS_SPINNING_LOCK
+	bool
+
+
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 967e114..0aeae17 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -40,6 +40,7 @@ config X86
 	select HAVE_GENERIC_DMA_COHERENT if X86_32
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS
 	select USER_STACKTRACE_SUPPORT
+	select HAVE_NB_WRITERS_SPINNING_LOCK
 
 config ARCH_DEFCONFIG
 	string
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 139b424..d9ee21b 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -283,6 +283,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
 	return 0;
 }
 
+static inline int __raw_writers_spinning_lock(raw_rwlock_t *lock)
+{
+	return lock->spinning_writers;
+}
+
 static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
 	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 845f81c..163e6de 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -13,6 +13,7 @@ typedef struct raw_spinlock {
 
 typedef struct {
 	unsigned int lock;
+	unsigned int spinning_writers;
 } raw_rwlock_t;
 
 #define __RAW_RW_LOCK_UNLOCKED		{ RW_LOCK_BIAS }
diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
index 05ea55f..9589b74 100644
--- a/arch/x86/lib/rwlock_64.S
+++ b/arch/x86/lib/rwlock_64.S
@@ -9,14 +9,18 @@
 ENTRY(__write_lock_failed)
 	CFI_STARTPROC
 	LOCK_PREFIX
+	incl 4(%rdi) /* add ourself as a spinning writer */
+1:	LOCK_PREFIX
 	addl $RW_LOCK_BIAS,(%rdi)
-1:	rep
+2:	rep
 	nop
 	cmpl $RW_LOCK_BIAS,(%rdi)
-	jne 1b
+	jne 2b
 	LOCK_PREFIX
 	subl $RW_LOCK_BIAS,(%rdi)
-	jnz  __write_lock_failed
+	jnz  1b
+	LOCK_PREFIX
+	decl 4(%rdi)
 	ret
 	CFI_ENDPROC
 END(__write_lock_failed)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e0c0fcc..8ce22af 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -121,6 +121,13 @@ do {								\
 
 #define spin_is_locked(lock)	__raw_spin_is_locked(&(lock)->raw_lock)
 
+#ifdef CONFIG_HAVE_NB_WRITERS_SPINNING_LOCK
+#define writers_spinning_lock(rwlock)				\
+	__raw_writers_spinning_lock(&(rwlock)->raw_lock)
+#else
+#define writers_spinning_lock(rwlock)	0
+#endif
+
 #ifdef CONFIG_GENERIC_LOCKBREAK
 #define spin_is_contended(lock) ((lock)->break_lock)
 #else
-- 
1.6.1


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ