[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210201102601.87168-1-songmuchun@bytedance.com>
Date: Mon, 1 Feb 2021 18:26:01 +0800
From: Muchun Song <songmuchun@...edance.com>
To: pmladek@...e.com, sergey.senozhatsky@...il.com,
rostedt@...dmis.org, john.ogness@...utronix.de,
akpm@...ux-foundation.org
Cc: linux-kernel@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH] printk: fix deadlock when kernel panic
We found a deadlock bug on our server when the kernel panic. It can be
described in the following diagram.
CPU0: CPU1:
panic rcu_dump_cpu_stacks
kdump_nmi_shootdown_cpus nmi_trigger_cpumask_backtrace
register_nmi_handler(crash_nmi_callback) printk_safe_flush
__printk_safe_flush
raw_spin_lock_irqsave(&read_lock)
// send NMI to other processors
apic_send_IPI_allbutself(NMI_VECTOR)
// NMI interrupt, dead loop
crash_nmi_callback
printk_safe_flush_on_panic
printk_safe_flush
__printk_safe_flush
// deal lock
raw_spin_lock_irqsave(&read_lock)
The register_nmi_handler() can be called in the __crash_kexec() or the
crash_smp_send_stop() on the x86-64. Because CPU1 is interrupted by the
NMI with holding the read_lock and crash_nmi_callback() never returns,
CPU0 can deadlock when printk_safe_flush_on_panic() is called.
When we hold the read_lock and then interrupted by the NMI, if the NMI
handler call nmi_panic(), it is also can lead to deadlock.
In order to fix it, we should call printk_safe_flush_on_panic without
holding the read_lock.
Fixes: cf9b1106c81c ("printk/nmi: flush NMI messages on the system panic")
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
kernel/printk/printk_safe.c | 58 ++++++++++++++++++++++++++-------------------
1 file changed, 34 insertions(+), 24 deletions(-)
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index a0e6f746de6c..86d9fa74ac5c 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -174,30 +174,13 @@ static void report_message_lost(struct printk_safe_seq_buf *s)
printk_deferred("Lost %d message(s)!\n", lost);
}
-/*
- * Flush data from the associated per-CPU buffer. The function
- * can be called either via IRQ work or independently.
- */
-static void __printk_safe_flush(struct irq_work *work)
+static void __printk_safe_flush_work(struct irq_work *work)
{
- static raw_spinlock_t read_lock =
- __RAW_SPIN_LOCK_INITIALIZER(read_lock);
struct printk_safe_seq_buf *s =
container_of(work, struct printk_safe_seq_buf, work);
- unsigned long flags;
size_t len;
- int i;
+ int i = 0;
- /*
- * The lock has two functions. First, one reader has to flush all
- * available message to make the lockless synchronization with
- * writers easier. Second, we do not want to mix messages from
- * different CPUs. This is especially important when printing
- * a backtrace.
- */
- raw_spin_lock_irqsave(&read_lock, flags);
-
- i = 0;
more:
len = atomic_read(&s->len);
@@ -232,6 +215,26 @@ static void __printk_safe_flush(struct irq_work *work)
out:
report_message_lost(s);
+}
+
+/*
+ * Flush data from the associated per-CPU buffer. The function
+ * can be called either via IRQ work or independently.
+ */
+static void printk_safe_flush_work(struct irq_work *work)
+{
+ unsigned long flags;
+ static DEFINE_RAW_SPINLOCK(read_lock);
+
+ /*
+ * The lock has two functions. First, one reader has to flush all
+ * available message to make the lockless synchronization with
+ * writers easier. Second, we do not want to mix messages from
+ * different CPUs. This is especially important when printing
+ * a backtrace.
+ */
+ raw_spin_lock_irqsave(&read_lock, flags);
+ __printk_safe_flush_work(work);
raw_spin_unlock_irqrestore(&read_lock, flags);
}
@@ -248,9 +251,9 @@ void printk_safe_flush(void)
for_each_possible_cpu(cpu) {
#ifdef CONFIG_PRINTK_NMI
- __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
+ printk_safe_flush_work(&per_cpu(nmi_print_seq, cpu).work);
#endif
- __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
+ printk_safe_flush_work(&per_cpu(safe_print_seq, cpu).work);
}
}
@@ -266,6 +269,8 @@ void printk_safe_flush(void)
*/
void printk_safe_flush_on_panic(void)
{
+ int cpu;
+
/*
* Make sure that we could access the main ring buffer.
* Do not risk a double release when more CPUs are up.
@@ -278,7 +283,12 @@ void printk_safe_flush_on_panic(void)
raw_spin_lock_init(&logbuf_lock);
}
- printk_safe_flush();
+ for_each_possible_cpu(cpu) {
+#ifdef CONFIG_PRINTK_NMI
+ __printk_safe_flush_work(&per_cpu(nmi_print_seq, cpu).work);
+#endif
+ __printk_safe_flush_work(&per_cpu(safe_print_seq, cpu).work);
+ }
}
#ifdef CONFIG_PRINTK_NMI
@@ -401,11 +411,11 @@ void __init printk_safe_init(void)
struct printk_safe_seq_buf *s;
s = &per_cpu(safe_print_seq, cpu);
- init_irq_work(&s->work, __printk_safe_flush);
+ init_irq_work(&s->work, printk_safe_flush_work);
#ifdef CONFIG_PRINTK_NMI
s = &per_cpu(nmi_print_seq, cpu);
- init_irq_work(&s->work, __printk_safe_flush);
+ init_irq_work(&s->work, printk_safe_flush_work);
#endif
}
--
2.11.0
Powered by blists - more mailing lists