[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20161027154933.1211-3-sergey.senozhatsky@gmail.com>
Date: Fri, 28 Oct 2016 00:49:29 +0900
From: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
To: Petr Mladek <pmladek@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Jan Kara <jack@...e.cz>, Tejun Heo <tj@...nel.org>,
Calvin Owens <calvinowens@...com>,
Thomas Gleixner <tglx@...utronix.de>,
Mel Gorman <mgorman@...hsingularity.net>,
Steven Rostedt <rostedt@...dmis.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Laura Abbott <labbott@...hat.com>,
Andy Lutomirski <luto@...nel.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Kees Cook <keescook@...omium.org>,
linux-kernel@...r.kernel.org,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
Sergey Senozhatsky <sergey.senozhatsky.work@...il.com>
Subject: [RFC][PATCHv4 2/6] printk: rename nmi.c and exported api
A preparation patch for printk_safe work. No functional change.
- rename nmi.c to print_safe.c
- rename exported functions to have `safe' prefix.
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
Acked-by: Steven Rostedt <rostedt@...dmis.org>
---
arch/arm/kernel/smp.c | 4 +-
include/linux/hardirq.h | 4 +-
include/linux/printk.h | 20 +++++-----
init/Kconfig | 16 ++++----
init/main.c | 2 +-
kernel/kexec_core.c | 2 +-
kernel/panic.c | 4 +-
kernel/printk/Makefile | 2 +-
kernel/printk/{nmi.c => printk_safe.c} | 69 +++++++++++++++++-----------------
lib/nmi_backtrace.c | 2 +-
10 files changed, 64 insertions(+), 61 deletions(-)
rename kernel/printk/{nmi.c => printk_safe.c} (77%)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 7dd14e8..8b056e1 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -644,11 +644,11 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CPU_BACKTRACE:
- printk_nmi_enter();
+ printk_safe_nmi_enter();
irq_enter();
nmi_cpu_backtrace(regs);
irq_exit();
- printk_nmi_exit();
+ printk_safe_nmi_exit();
break;
default:
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index c683996..14840aa 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -61,7 +61,7 @@ extern void irq_exit(void);
#define nmi_enter() \
do { \
- printk_nmi_enter(); \
+ printk_safe_nmi_enter(); \
lockdep_off(); \
ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \
@@ -78,7 +78,7 @@ extern void irq_exit(void);
preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \
lockdep_on(); \
- printk_nmi_exit(); \
+ printk_safe_nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index eac1af8..c9a3080 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -132,17 +132,17 @@ void early_printk(const char *s, ...) { }
#endif
#ifdef CONFIG_PRINTK_NMI
-extern void printk_nmi_init(void);
-extern void printk_nmi_enter(void);
-extern void printk_nmi_exit(void);
-extern void printk_nmi_flush(void);
-extern void printk_nmi_flush_on_panic(void);
+extern void printk_safe_init(void);
+extern void printk_safe_nmi_enter(void);
+extern void printk_safe_nmi_exit(void);
+extern void printk_safe_flush(void);
+extern void printk_safe_flush_on_panic(void);
#else
-static inline void printk_nmi_init(void) { }
-static inline void printk_nmi_enter(void) { }
-static inline void printk_nmi_exit(void) { }
-static inline void printk_nmi_flush(void) { }
-static inline void printk_nmi_flush_on_panic(void) { }
+static inline void printk_safe_init(void) { }
+static inline void printk_safe_nmi_enter(void) { }
+static inline void printk_safe_nmi_exit(void) { }
+static inline void printk_safe_flush(void) { }
+static inline void printk_safe_flush_on_panic(void) { }
#endif /* PRINTK_NMI */
#ifdef CONFIG_PRINTK
diff --git a/init/Kconfig b/init/Kconfig
index 172f80e..1f43175 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -875,17 +875,19 @@ config LOG_CPU_MAX_BUF_SHIFT
13 => 8 KB for each CPU
12 => 4 KB for each CPU
-config NMI_LOG_BUF_SHIFT
- int "Temporary per-CPU NMI log buffer size (12 => 4KB, 13 => 8KB)"
+config PRINTK_SAFE_LOG_BUF_SHIFT
+ int "Temporary per-CPU printk log buffer size (12 => 4KB, 13 => 8KB)"
range 10 21
default 13
- depends on PRINTK_NMI
+ depends on PRINTK
help
- Select the size of a per-CPU buffer where NMI messages are temporary
- stored. They are copied to the main log buffer in a safe context
- to avoid a deadlock. The value defines the size as a power of 2.
+ Select the size of an alternate printk per-CPU buffer where messages
+ printed from usafe contexts are temporary stored. One example would
+ be NMI messages, another one - printk recursion. The messages are
+ copied to the main log buffer in a safe context to avoid a deadlock.
+ The value defines the size as a power of 2.
- NMI messages are rare and limited. The largest one is when
+ Those messages are rare and limited. The largest one is when
a backtrace is printed. It usually fits into 4KB. Select
8KB if you want to be on the safe side.
diff --git a/init/main.c b/init/main.c
index cbed073..365cee2 100644
--- a/init/main.c
+++ b/init/main.c
@@ -577,7 +577,7 @@ asmlinkage __visible void __init start_kernel(void)
timekeeping_init();
time_init();
sched_clock_postinit();
- printk_nmi_init();
+ printk_safe_init();
perf_event_init();
profile_init();
call_function_init();
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 5616755..297508a1 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -914,7 +914,7 @@ void crash_kexec(struct pt_regs *regs)
old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
if (old_cpu == PANIC_CPU_INVALID) {
/* This is the 1st CPU which comes here, so go ahead. */
- printk_nmi_flush_on_panic();
+ printk_safe_flush_on_panic();
__crash_kexec(regs);
/*
diff --git a/kernel/panic.c b/kernel/panic.c
index e6480e2..67c11fd 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -188,7 +188,7 @@ void panic(const char *fmt, ...)
* Bypass the panic_cpu check and call __crash_kexec directly.
*/
if (!_crash_kexec_post_notifiers) {
- printk_nmi_flush_on_panic();
+ printk_safe_flush_on_panic();
__crash_kexec(NULL);
/*
@@ -213,7 +213,7 @@ void panic(const char *fmt, ...)
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
/* Call flush even twice. It tries harder with a single online CPU */
- printk_nmi_flush_on_panic();
+ printk_safe_flush_on_panic();
kmsg_dump(KMSG_DUMP_PANIC);
/*
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
index abb0042..6079281 100644
--- a/kernel/printk/Makefile
+++ b/kernel/printk/Makefile
@@ -1,3 +1,3 @@
obj-y = printk.o
-obj-$(CONFIG_PRINTK_NMI) += nmi.o
+obj-$(CONFIG_PRINTK_NMI) += printk_safe.o
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
diff --git a/kernel/printk/nmi.c b/kernel/printk/printk_safe.c
similarity index 77%
rename from kernel/printk/nmi.c
rename to kernel/printk/printk_safe.c
index 16bab47..1f66163 100644
--- a/kernel/printk/nmi.c
+++ b/kernel/printk/printk_safe.c
@@ -1,5 +1,5 @@
/*
- * nmi.c - Safe printk in NMI context
+ * printk_safe.c - Safe printk in NMI context
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -39,18 +39,18 @@
* were handled or when IRQs are blocked.
*/
DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default;
-static int printk_nmi_irq_ready;
+static int printk_safe_irq_ready;
atomic_t nmi_message_lost;
-#define NMI_LOG_BUF_LEN ((1 << CONFIG_NMI_LOG_BUF_SHIFT) - \
+#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
sizeof(atomic_t) - sizeof(struct irq_work))
-struct nmi_seq_buf {
+struct printk_safe_seq_buf {
atomic_t len; /* length of written data */
struct irq_work work; /* IRQ work that flushes the buffer */
- unsigned char buffer[NMI_LOG_BUF_LEN];
+ unsigned char buffer[SAFE_LOG_BUF_LEN];
};
-static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
+static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
/*
* Safe printk() for NMI context. It uses a per-CPU buffer to
@@ -58,9 +58,9 @@ static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
* one writer running. But the buffer might get flushed from another
* CPU, so we need to be careful.
*/
-static int vprintk_nmi(const char *fmt, va_list args)
+static int vprintk_safe_nmi(const char *fmt, va_list args)
{
- struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
+ struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
int add = 0;
size_t len;
@@ -90,7 +90,7 @@ static int vprintk_nmi(const char *fmt, va_list args)
goto again;
/* Get flushed in a more safe context. */
- if (add && printk_nmi_irq_ready) {
+ if (add && printk_safe_irq_ready) {
/* Make sure that IRQ work is really initialized. */
smp_rmb();
irq_work_queue(&s->work);
@@ -99,7 +99,7 @@ static int vprintk_nmi(const char *fmt, va_list args)
return add;
}
-static void printk_nmi_flush_line(const char *text, int len)
+static void printk_safe_flush_line(const char *text, int len)
{
/*
* The buffers are flushed in NMI only on panic. The messages must
@@ -117,23 +117,24 @@ static void printk_nmi_flush_line(const char *text, int len)
* printk one line from the temporary buffer from @start index until
* and including the @end index.
*/
-static void printk_nmi_flush_seq_line(struct nmi_seq_buf *s,
+static void printk_safe_flush_seq_line(struct printk_safe_seq_buf *s,
int start, int end)
{
const char *buf = s->buffer + start;
- printk_nmi_flush_line(buf, (end - start) + 1);
+ printk_safe_flush_line(buf, (end - start) + 1);
}
/*
* Flush data from the associated per_CPU buffer. The function
* can be called either via IRQ work or independently.
*/
-static void __printk_nmi_flush(struct irq_work *work)
+static void __printk_safe_flush(struct irq_work *work)
{
static raw_spinlock_t read_lock =
__RAW_SPIN_LOCK_INITIALIZER(read_lock);
- struct nmi_seq_buf *s = container_of(work, struct nmi_seq_buf, work);
+ struct printk_safe_seq_buf *s = container_of(work,
+ struct printk_safe_seq_buf, work);
unsigned long flags;
size_t len, size;
int i, last_i;
@@ -157,9 +158,9 @@ static void __printk_nmi_flush(struct irq_work *work)
* @len must only increase.
*/
if (i && i >= len) {
- const char *msg = "printk_nmi_flush: internal error\n";
+ const char *msg = "printk_safe_flush: internal error\n";
- printk_nmi_flush_line(msg, strlen(msg));
+ printk_safe_flush_line(msg, strlen(msg));
}
if (!len)
@@ -174,14 +175,14 @@ static void __printk_nmi_flush(struct irq_work *work)
/* Print line by line. */
for (; i < size; i++) {
if (s->buffer[i] == '\n') {
- printk_nmi_flush_seq_line(s, last_i, i);
+ printk_safe_flush_seq_line(s, last_i, i);
last_i = i + 1;
}
}
/* Check if there was a partial line. */
if (last_i < size) {
- printk_nmi_flush_seq_line(s, last_i, size - 1);
- printk_nmi_flush_line("\n", strlen("\n"));
+ printk_safe_flush_seq_line(s, last_i, size - 1);
+ printk_safe_flush_line("\n", strlen("\n"));
}
/*
@@ -198,31 +199,31 @@ static void __printk_nmi_flush(struct irq_work *work)
}
/**
- * printk_nmi_flush - flush all per-cpu nmi buffers.
+ * printk_safe_flush - flush all per-cpu nmi buffers.
*
* The buffers are flushed automatically via IRQ work. This function
* is useful only when someone wants to be sure that all buffers have
* been flushed at some point.
*/
-void printk_nmi_flush(void)
+void printk_safe_flush(void)
{
int cpu;
for_each_possible_cpu(cpu)
- __printk_nmi_flush(&per_cpu(nmi_print_seq, cpu).work);
+ __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
}
/**
- * printk_nmi_flush_on_panic - flush all per-cpu nmi buffers when the system
+ * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
* goes down.
*
- * Similar to printk_nmi_flush() but it can be called even in NMI context when
+ * Similar to printk_safe_flush() but it can be called even in NMI context when
* the system goes down. It does the best effort to get NMI messages into
* the main ring buffer.
*
* Note that it could try harder when there is only one CPU online.
*/
-void printk_nmi_flush_on_panic(void)
+void printk_safe_flush_on_panic(void)
{
/*
* Make sure that we could access the main ring buffer.
@@ -236,33 +237,33 @@ void printk_nmi_flush_on_panic(void)
raw_spin_lock_init(&logbuf_lock);
}
- printk_nmi_flush();
+ printk_safe_flush();
}
-void __init printk_nmi_init(void)
+void __init printk_safe_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
- struct nmi_seq_buf *s = &per_cpu(nmi_print_seq, cpu);
+ struct printk_safe_seq_buf *s = &per_cpu(nmi_print_seq, cpu);
- init_irq_work(&s->work, __printk_nmi_flush);
+ init_irq_work(&s->work, __printk_safe_flush);
}
/* Make sure that IRQ works are initialized before enabling. */
smp_wmb();
- printk_nmi_irq_ready = 1;
+ printk_safe_irq_ready = 1;
/* Flush pending messages that did not have scheduled IRQ works. */
- printk_nmi_flush();
+ printk_safe_flush();
}
-void printk_nmi_enter(void)
+void printk_safe_nmi_enter(void)
{
- this_cpu_write(printk_func, vprintk_nmi);
+ this_cpu_write(printk_func, vprintk_safe_nmi);
}
-void printk_nmi_exit(void)
+void printk_safe_nmi_exit(void)
{
this_cpu_write(printk_func, vprintk_default);
}
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 7555475..5f7999e 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -77,7 +77,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
* Force flush any remote buffers that might be stuck in IRQ context
* and therefore could not run their irq_work.
*/
- printk_nmi_flush();
+ printk_safe_flush();
clear_bit_unlock(0, &backtrace_flag);
put_cpu();
--
2.10.1.502.g6598894
Powered by blists - more mailing lists