lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 22 Mar 2013 18:25:20 -0700
From:	Andi Kleen <andi@...stfloor.org>
To:	linux-kernel@...r.kernel.org
Cc:	torvalds@...ux-foundation.org, akpm@...ux-foundation.org,
	x86@...nel.org, Andi Kleen <ak@...ux.intel.com>
Subject: [PATCH 26/29] x86, tsx: Add adaptation support to rw spinlocks

From: Andi Kleen <ak@...ux.intel.com>

Add elision adaptation state to the rwlocks and use the generic
adaptation wrapper. This unfortunately increases the size of the rwlock:
6 bytes for NR_CPUS>=2048, otherwise by 2 bytes.

Signed-off-by: Andi Kleen <ak@...ux.intel.com>
---
 arch/x86/include/asm/rwlock.h |   30 +++++++++++++++++++++---------
 arch/x86/kernel/rtm-locks.c   |   22 +++++++++++++++++-----
 2 files changed, 38 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/rwlock.h b/arch/x86/include/asm/rwlock.h
index a5370a0..a3929cc 100644
--- a/arch/x86/include/asm/rwlock.h
+++ b/arch/x86/include/asm/rwlock.h
@@ -6,9 +6,15 @@
 #if CONFIG_NR_CPUS <= 2048
 
 #ifndef __ASSEMBLY__
-typedef union {
-	s32 lock;
-	s32 write;
+typedef struct {
+	union {
+		s32 lock;
+		s32 write;
+	};
+#ifdef CONFIG_RTM_LOCKS
+	short	elision_adapt;
+	/* 2 bytes padding */
+#endif
 } arch_rwlock_t;
 #endif
 
@@ -24,12 +30,18 @@ typedef union {
 #include <linux/const.h>
 
 #ifndef __ASSEMBLY__
-typedef union {
-	s64 lock;
-	struct {
-		u32 read;
-		s32 write;
+typedef struct {
+	union {
+		s64 lock;
+		struct {
+			u32 read;
+			s32 write;
+		};
 	};
+#ifdef CONFIG_RTM_LOCKS
+	short	elision_adapt;
+	/* 6 bytes padding for now */
+#endif
 } arch_rwlock_t;
 #endif
 
@@ -42,7 +54,7 @@ typedef union {
 
 #endif /* CONFIG_NR_CPUS */
 
-#define __ARCH_RW_LOCK_UNLOCKED		{ RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED		{ { RW_LOCK_BIAS } }
 
 /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
 
diff --git a/arch/x86/kernel/rtm-locks.c b/arch/x86/kernel/rtm-locks.c
index 1651049..bc3275a 100644
--- a/arch/x86/kernel/rtm-locks.c
+++ b/arch/x86/kernel/rtm-locks.c
@@ -155,8 +155,16 @@ static int rtm_spin_is_locked(struct arch_spinlock *lock)
  * This uses direct calls with static patching, not pvops.
  */
 
-__read_mostly bool rwlock_elision = true;
-module_param(rwlock_elision, bool, 0644);
+static struct static_key rwlock_elision = STATIC_KEY_INIT_FALSE;
+module_param(rwlock_elision, static_key, 0644);
+
+static __read_mostly struct elision_config readlock_elision_config =
+	DEFAULT_ELISION_CONFIG;
+TUNE_ELISION_CONFIG(readlock, readlock_elision_config);
+
+static __read_mostly struct elision_config writelock_elision_config =
+	DEFAULT_ELISION_CONFIG;
+TUNE_ELISION_CONFIG(writelock, writelock_elision_config);
 
 void rtm_read_lock(arch_rwlock_t *rw)
 {
@@ -167,7 +175,8 @@ void rtm_read_lock(arch_rwlock_t *rw)
 	 * would abort anyways.
 	 */
 
-	if (!elide_lock(rwlock_elision, !arch_rwlock_is_locked(rw)))
+	if (!elide_lock_adapt(rwlock_elision, !arch_rwlock_is_locked(rw),
+			      &rw->elision_adapt, &readlock_elision_config))
 		arch_do_read_lock(rw);
 }
 EXPORT_SYMBOL(rtm_read_lock);
@@ -210,7 +219,8 @@ EXPORT_SYMBOL(rtm_read_unlock_irqrestore);
 
 int rtm_read_trylock(arch_rwlock_t *rw)
 {
-	if (elide_lock(rwlock_elision, !arch_rwlock_is_locked(rw)))
+	if (elide_lock_adapt(rwlock_elision, !arch_rwlock_is_locked(rw),
+			     &rw->elision_adapt, &readlock_elision_config))
 		return 1;
 	return arch_do_read_trylock(rw);
 }
@@ -218,7 +228,8 @@ EXPORT_SYMBOL(rtm_read_trylock);
 
 void rtm_write_lock(arch_rwlock_t *rw)
 {
-	if (!elide_lock(rwlock_elision, !arch_write_can_lock(rw)))
+	if (!elide_lock_adapt(rwlock_elision, !arch_write_can_lock(rw),
+			      &rw->elision_adapt, &writelock_elision_config))
 		arch_do_write_lock(rw);
 }
 EXPORT_SYMBOL(rtm_write_lock);
@@ -451,6 +462,7 @@ void init_rtm_spinlocks(void)
 	pv_irq_ops.restore_fl = PV_CALLEE_SAVE(rtm_restore_fl);
 	pv_init_ops.patch = rtm_patch;
 
+	static_key_slow_inc(&rwlock_elision);
 	static_key_slow_inc(&mutex_elision);
 }
 
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ