lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1485983620-11958-2-git-send-email-longman@redhat.com>
Date:   Wed,  1 Feb 2017 16:13:38 -0500
From:   Waiman Long <longman@...hat.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>
Cc:     linux-kernel@...r.kernel.org, Waiman Long <longman@...hat.com>
Subject: [PATCH v2 1/3] locking/spinlock_debug: Reduce lockup suspected message clutter

When the debug spinlock code detects a lockup, it will print out an
error messages as well as the backtraces of all the CPUs. However, if
more than one CPUs are waiting on that lock, multiple lockup messages
will be printed leading to garbled output.

To reduce clutter in the console log, now only one of the lock waiters
will be allowed to print out the CPU backtraces.

Since break_lock, like lockup, can only have a value of 0 or 1, its
size is now reduced so that on a 64-bit architecture, the size of the
raw_spinlock structure won't increase whether CONFIG_GENERIC_LOCKBREAK
is defined or not.

Signed-off-by: Waiman Long <longman@...hat.com>
---
 include/linux/spinlock_types.h  |  4 +++-
 kernel/locking/spinlock_debug.c | 26 +++++++++++++++++++++-----
 2 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 73548eb..99f28bd 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -20,9 +20,10 @@
 typedef struct raw_spinlock {
 	arch_spinlock_t raw_lock;
 #ifdef CONFIG_GENERIC_LOCKBREAK
-	unsigned int break_lock;
+	unsigned short break_lock;
 #endif
 #ifdef CONFIG_DEBUG_SPINLOCK
+	unsigned short lockup;
 	unsigned int magic, owner_cpu;
 	void *owner;
 #endif
@@ -43,6 +44,7 @@
 
 #ifdef CONFIG_DEBUG_SPINLOCK
 # define SPIN_DEBUG_INIT(lockname)		\
+	.lockup = 0,				\
 	.magic = SPINLOCK_MAGIC,		\
 	.owner_cpu = -1,			\
 	.owner = SPINLOCK_OWNER_INIT,
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 0374a59..0f880a8 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -27,6 +27,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 	lock->magic = SPINLOCK_MAGIC;
 	lock->owner = SPINLOCK_OWNER_INIT;
 	lock->owner_cpu = -1;
+	lock->lockup = 0;
 }
 
 EXPORT_SYMBOL(__raw_spin_lock_init);
@@ -101,6 +102,24 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
 							lock, "wrong CPU");
 	lock->owner = SPINLOCK_OWNER_INIT;
 	lock->owner_cpu = -1;
+	lock->lockup = 0;
+}
+
+static inline void __spin_lockup(raw_spinlock_t *lock)
+{
+	/*
+	 * lockup suspected:
+	 *
+	 * Only one of the lock waiters will be allowed to print the lockup
+	 * message in order to avoid an avalanche of lockup and backtrace
+	 * messages from different lock waiters of the same lock.
+	 */
+	if (!xchg(&lock->lockup, 1)) {
+		spin_dump(lock, "lockup suspected");
+#ifdef CONFIG_SMP
+		trigger_all_cpu_backtrace();
+#endif
+	}
 }
 
 static void __spin_lock_debug(raw_spinlock_t *lock)
@@ -113,11 +132,8 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
 			return;
 		__delay(1);
 	}
-	/* lockup suspected: */
-	spin_dump(lock, "lockup suspected");
-#ifdef CONFIG_SMP
-	trigger_all_cpu_backtrace();
-#endif
+
+	__spin_lockup(lock);
 
 	/*
 	 * The trylock above was causing a livelock.  Give the lower level arch
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ