[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210531162051.2325-2-john.ogness@linutronix.de>
Date: Mon, 31 May 2021 18:20:50 +0200
From: John Ogness <john.ogness@...utronix.de>
To: Petr Mladek <pmladek@...e.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@...il.com>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Thomas Gleixner <tglx@...utronix.de>,
linux-kernel@...r.kernel.org,
Sergey Senozhatsky <senozhatsky@...omium.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Stephen Rothwell <sfr@...b.auug.org.au>,
Dmitry Safonov <0x7f454c46@...il.com>,
Valentin Schneider <valentin.schneider@....com>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Stephen Boyd <swboyd@...omium.org>,
Alexander Potapenko <glider@...gle.com>,
"Paul E. McKenney" <paulmck@...nel.org>
Subject: [PATCH next v1 1/2] dump_stack: move cpu lock to printk.c
dump_stack() implements its own cpu-reentrant spinning lock to
best-effort serialize stack traces in the printk log. However,
there are other functions (such as show_regs()) that can also
benefit from this serialization.
Move the cpu-reentrant spinning lock (cpu lock) into new helper
functions printk_cpu_lock()/printk_cpu_unlock() so that it is
available for others as well. For !CONFIG_PRINTK or !CONFIG_SMP
the cpu lock is a NOP.
Note that having multiple cpu locks in the system can easily
lead to deadlock. Code needing a cpu lock should use the
printk cpu lock, since the printk cpu lock could be acquired
from any code and any context.
Signed-off-by: John Ogness <john.ogness@...utronix.de>
---
include/linux/printk.h | 13 ++++++
kernel/printk/printk.c | 92 ++++++++++++++++++++++++++++++++++++++++++
lib/dump_stack.c | 43 ++------------------
3 files changed, 108 insertions(+), 40 deletions(-)
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f589b8b60806..2f2d89b9e728 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -287,6 +287,19 @@ static inline void printk_safe_flush_on_panic(void)
}
#endif
+#if defined(CONFIG_PRINTK) && defined(CONFIG_SMP)
+extern void printk_cpu_lock(unsigned int *cpu_store, unsigned long *flags);
+extern void printk_cpu_unlock(unsigned int cpu_store, unsigned long flags);
+#else
+static inline void printk_cpu_lock(unsigned int *cpu_store, unsigned long *flags)
+{
+}
+
+static inline void printk_cpu_unlock(unsigned int cpu_store, unsigned long flags)
+{
+}
+#endif
+
extern int kptr_restrict;
/**
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 114e9963f903..98feead621ff 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3531,4 +3531,96 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
}
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+#ifdef CONFIG_SMP
+static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
+
+/*
+ * printk_cpu_lock: Acquire the printk cpu-reentrant spinning lock.
+ * @cpu_store: A buffer to store lock state.
+ * @flags: A buffer to store irq state.
+ *
+ * If no processor has the lock, the calling processor takes the lock and
+ * becomes the owner. If the calling processor is already the owner of the
+ * lock, this function succeeds immediately. If the lock is locked by another
+ * processor, that function spins until the calling processor becomes the
+ * owner.
+ *
+ * It is safe to call this function from any context and state.
+ */
+void printk_cpu_lock(unsigned int *cpu_store, unsigned long *flags)
+{
+ unsigned int cpu;
+
+ for (;;) {
+ cpu = get_cpu();
+
+ *cpu_store = atomic_read(&printk_cpulock_owner);
+
+ if (*cpu_store == -1) {
+ local_irq_save(*flags);
+
+ /*
+ * Guarantee loads an stores from the previous lock
+ * owner are visible to this CPU once it is the lock
+ * owner. This pairs with cpu_lock:A.
+ *
+ * Memory barrier involvement:
+ *
+ * If cpu_lock:A reads from cpu_unlock:B, then
+ * cpu_lock:B reads from cpu_unlock:A.
+ *
+ * Relies on:
+ *
+ * RELEASE from cpu_unlock:A to cpu_unlock:B
+ * matching
+ * ACQUIRE from cpu_lock:A to cpu_lock:B
+ */
+ if (atomic_try_cmpxchg_acquire(&printk_cpulock_owner,
+ cpu_store, cpu)) { /* LMM(cpu_lock:A) */
+
+ /* This CPU begins loading/storing data: LMM(cpu_lock:B) */
+ break;
+ }
+
+ local_irq_restore(*flags);
+
+ } else if (*cpu_store == cpu) {
+ break;
+ }
+
+ put_cpu();
+ cpu_relax();
+ }
+}
+EXPORT_SYMBOL(printk_cpu_lock);
+
+/*
+ * printk_cpu_unlock: Release the printk cpu-reentrant spinning lock.
+ * @cpu_store: The current lock state.
+ * @flags: The current irq state.
+ *
+ * Release the lock. The calling processor must be the owner of the lock.
+ *
+ * It is safe to call this function from any context and state.
+ */
+void printk_cpu_unlock(unsigned int cpu_store, unsigned long flags)
+{
+ if (cpu_store == -1) {
+ /* This CPU is finished loading/storing data: LMM(cpu_unlock:A) */
+
+ /*
+ * Guarantee loads an stores from this CPU when it is the lock
+ * owner are visible to the next lock owner. This pairs with
+ * cpu_lock:A.
+ */
+ atomic_set_release(&printk_cpulock_owner, cpu_store); /* LMM(cpu_unlock:B) */
+
+ local_irq_restore(flags);
+ }
+
+ put_cpu();
+}
+EXPORT_SYMBOL(printk_cpu_unlock);
+#endif /* CONFIG_SMP */
+
#endif
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 6e7ca3d67710..88f13250f29d 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -93,52 +93,15 @@ static void __dump_stack(const char *log_lvl)
*
* Architectures can override this implementation by implementing its own.
*/
-#ifdef CONFIG_SMP
-static atomic_t dump_lock = ATOMIC_INIT(-1);
-
asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
+ unsigned int cpu_store;
unsigned long flags;
- int was_locked;
- int old;
- int cpu;
-
- /*
- * Permit this cpu to perform nested stack dumps while serialising
- * against other CPUs
- */
-retry:
- local_irq_save(flags);
- cpu = smp_processor_id();
- old = atomic_cmpxchg(&dump_lock, -1, cpu);
- if (old == -1) {
- was_locked = 0;
- } else if (old == cpu) {
- was_locked = 1;
- } else {
- local_irq_restore(flags);
- /*
- * Wait for the lock to release before jumping to
- * atomic_cmpxchg() in order to mitigate the thundering herd
- * problem.
- */
- do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
- goto retry;
- }
-
- __dump_stack(log_lvl);
-
- if (!was_locked)
- atomic_set(&dump_lock, -1);
- local_irq_restore(flags);
-}
-#else
-asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
-{
+ printk_cpu_lock(&cpu_store, &flags);
__dump_stack(log_lvl);
+ printk_cpu_unlock(cpu_store, flags);
}
-#endif
EXPORT_SYMBOL(dump_stack_lvl);
asmlinkage __visible void dump_stack(void)
--
2.20.1
Powered by blists - more mailing lists