[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210607200232.22211-2-john.ogness@linutronix.de>
Date: Mon, 7 Jun 2021 22:02:31 +0200
From: John Ogness <john.ogness@...utronix.de>
To: Petr Mladek <pmladek@...e.com>
Cc: Sergey Senozhatsky <senozhatsky@...omium.org>,
Steven Rostedt <rostedt@...dmis.org>,
Thomas Gleixner <tglx@...utronix.de>,
linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
Dmitry Safonov <0x7f454c46@...il.com>,
Stephen Rothwell <sfr@...b.auug.org.au>,
Peter Zijlstra <peterz@...radead.org>,
Marco Elver <elver@...gle.com>,
Alexander Potapenko <glider@...gle.com>,
Stephen Boyd <swboyd@...omium.org>
Subject: [PATCH next v2 1/2] dump_stack: move cpu lock to printk.c
dump_stack() implements its own cpu-reentrant spinning lock to
best-effort serialize stack traces in the printk log. However,
there are other functions (such as show_regs()) that can also
benefit from this serialization.
Move the cpu-reentrant spinning lock (cpu lock) into new helper
functions printk_cpu_lock_irqsave()/printk_cpu_unlock_irqrestore()
so that it is available for others as well. For !CONFIG_SMP the
cpu lock is a NOP.
Note that having multiple cpu locks in the system can easily
lead to deadlock. Code needing a cpu lock should use the
printk cpu lock, since the printk cpu lock could be acquired
from any code and any context.
Signed-off-by: John Ogness <john.ogness@...utronix.de>
---
include/linux/printk.h | 13 ++++++++
kernel/printk/printk.c | 75 ++++++++++++++++++++++++++++++++++++++++++
lib/dump_stack.c | 41 +++--------------------
3 files changed, 92 insertions(+), 37 deletions(-)
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f589b8b60806..b84e0c59220f 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -287,6 +287,19 @@ static inline void printk_safe_flush_on_panic(void)
}
#endif
+#if defined(CONFIG_SMP)
+extern void printk_cpu_lock_irqsave(bool *lock_flag, unsigned long *irq_flags);
+extern void printk_cpu_unlock_irqrestore(bool lock_flag, unsigned long irq_flags);
+#else
+static inline void printk_cpu_lock_irqsave(bool *lock_flag, unsigned long *irq_flags)
+{
+}
+
+static inline void printk_cpu_unlock_irqrestore(bool lock_flag, unsigned long irq_flags)
+{
+}
+#endif
+
extern int kptr_restrict;
/**
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 114e9963f903..f94babb38493 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3532,3 +3532,78 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif
+
+#ifdef CONFIG_SMP
+static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
+
+/*
+ * printk_cpu_lock_irqsave: Acquire the printk cpu-reentrant spinning lock
+ * and disable interrupts.
+ * @lock_flag: A buffer to store lock state.
+ * @irq_flags: A buffer to store irq state.
+ *
+ * If no processor has the lock, the calling processor takes the lock and
+ * becomes the owner. If the calling processor is already the owner of the
+ * lock, this function succeeds immediately. If the lock is held by another
+ * processor, this function spins until the calling processor becomes the
+ * owner. This function returns with interrupts disabled.
+ *
+ * It is safe to call this function from any context and state.
+ */
+void printk_cpu_lock_irqsave(bool *lock_flag, unsigned long *irq_flags)
+{
+ int old;
+ int cpu;
+
+retry:
+ local_irq_save(*irq_flags);
+
+ cpu = smp_processor_id();
+
+ old = atomic_cmpxchg(&printk_cpulock_owner, -1, cpu);
+ if (old == -1) {
+ /* This CPU is now the owner. */
+
+ *lock_flag = true;
+
+ } else if (old == cpu) {
+ /* This CPU is already the owner. */
+
+ *lock_flag = false;
+
+ } else {
+ local_irq_restore(*irq_flags);
+
+ /*
+ * Wait for the lock to release before jumping to cmpxchg()
+ * in order to mitigate the thundering herd problem.
+ */
+ do {
+ cpu_relax();
+ } while (atomic_read(&printk_cpulock_owner) != -1);
+
+ goto retry;
+ }
+}
+EXPORT_SYMBOL(printk_cpu_lock_irqsave);
+
+/*
+ * printk_cpu_unlock_irqrestore: Release the printk cpu-reentrant spinning
+ * lock and restore interrupts.
+ * @lock_flag: The current lock state.
+ * @irq_flags: The current irq state.
+ *
+ * Release the lock. The calling processor must be the owner of the lock.
+ *
+ * It is safe to call this function from any context and state.
+ */
+void printk_cpu_unlock_irqrestore(bool lock_flag, unsigned long irq_flags)
+{
+ if (lock_flag) {
+ atomic_set(&printk_cpulock_owner, -1);
+
+ local_irq_restore(irq_flags);
+ }
+}
+EXPORT_SYMBOL(printk_cpu_unlock_irqrestore);
+#endif /* CONFIG_SMP */
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 6e7ca3d67710..84c68bad94c7 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -93,52 +93,19 @@ static void __dump_stack(const char *log_lvl)
*
* Architectures can override this implementation by implementing its own.
*/
-#ifdef CONFIG_SMP
-static atomic_t dump_lock = ATOMIC_INIT(-1);
-
asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
- unsigned long flags;
- int was_locked;
- int old;
- int cpu;
+ unsigned long irq_flags;
+ bool lock_flag;
/*
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
-retry:
- local_irq_save(flags);
- cpu = smp_processor_id();
- old = atomic_cmpxchg(&dump_lock, -1, cpu);
- if (old == -1) {
- was_locked = 0;
- } else if (old == cpu) {
- was_locked = 1;
- } else {
- local_irq_restore(flags);
- /*
- * Wait for the lock to release before jumping to
- * atomic_cmpxchg() in order to mitigate the thundering herd
- * problem.
- */
- do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
- goto retry;
- }
-
- __dump_stack(log_lvl);
-
- if (!was_locked)
- atomic_set(&dump_lock, -1);
-
- local_irq_restore(flags);
-}
-#else
-asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
-{
+ printk_cpu_lock_irqsave(&lock_flag, &irq_flags);
__dump_stack(log_lvl);
+ printk_cpu_unlock_irqrestore(lock_flag, irq_flags);
}
-#endif
EXPORT_SYMBOL(dump_stack_lvl);
asmlinkage __visible void dump_stack(void)
--
2.20.1
Powered by blists - more mailing lists