[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1330917630.18835.44.camel@debian>
Date: Mon, 05 Mar 2012 11:20:30 +0800
From: Alex Shi <alex.shi@...el.com>
To: tglx@...utronix.com, mingo@...hat.com, hpa@...or.com,
arnd@...db.de, akpm@...ux-foundation.org
Cc: linux-kernel@...r.kernel.org, x86@...nel.org, andi.kleen@...el.com
Subject: [RFC patch] spin_lock: add cross cache lines checking
Modern x86 CPU won't hold whole memory bus when executing 'lock'
prefixed instructions unless the instruction destination is crossing 2
cache lines. If so, it is disaster of system performance.
Actually if the lock is not in the 'packed' structure, gcc places it
safely under x86 arch. But seems add this checking in
CONFIG_DEBUG_LOCK_ALLOC is harmless.
btw, change SPIN_BUG_ON macro a little for style complain.
Inspired-by: Andi Kleen <andi.kleen@...el.com>
Signed-off-by: Alex Shi <alex.shi@...el.com>
---
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 48f99f1..79d146e 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -7,6 +7,8 @@
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_SIZE_MASK (~(L1_CACHE_BYTES - 1)UL)
+
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
index 1bfcfe5..244e528 100644
--- a/include/asm-generic/cache.h
+++ b/include/asm-generic/cache.h
@@ -9,4 +9,6 @@
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_SIZE_MASK (~(L1_CACHE_BYTES - 1)UL)
+
#endif /* __ASM_GENERIC_CACHE_H */
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 5f3eacd..554dcda 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,6 +13,12 @@
#include <linux/delay.h>
#include <linux/module.h>
+#define SPIN_BUG_ON(cond, lock, msg) {if (unlikely(cond)) spin_bug(lock, msg); }
+
+#define is_cross_lines(p) \
+ (((unsigned long)(p) & L1_CACHE_SIZE_MASK) != \
+ (((unsigned long)(p) + sizeof(*p) - 1) & L1_CACHE_SIZE_MASK)) \
+
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
@@ -22,6 +28,8 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
+ SPIN_BUG_ON(is_cross_lines(lock->raw_lock), lock,
+ "!!! the lock cross cache lines !!!");
#endif
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
@@ -40,6 +48,8 @@ void __rwlock_init(rwlock_t *lock, const char *name,
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
+ SPIN_BUG_ON(is_cross_lines(lock->raw_lock), lock,
+ "!!! the lock cross cache lines !!!");
#endif
lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
@@ -75,8 +85,6 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
spin_dump(lock, msg);
}
-#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
-
static inline void
debug_spin_lock_before(raw_spinlock_t *lock)
{
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists