[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240722171939.3349410-12-john.ogness@linutronix.de>
Date: Mon, 22 Jul 2024 19:25:31 +0206
From: John Ogness <john.ogness@...utronix.de>
To: Petr Mladek <pmladek@...e.com>
Cc: Sergey Senozhatsky <senozhatsky@...omium.org>,
Steven Rostedt <rostedt@...dmis.org>,
Thomas Gleixner <tglx@...utronix.de>,
linux-kernel@...r.kernel.org
Subject: [PATCH printk v3 11/19] printk: nbcon: Rely on kthreads for normal operation
Once the kthread is running and available
(i.e. @printk_kthreads_running is set), the kthread becomes
responsible for flushing any pending messages which are added
in NBCON_PRIO_NORMAL context. Namely the legacy
console_flush_all() and device_release() no longer flush the
console. And nbcon_atomic_flush_pending() used by
nbcon_cpu_emergency_exit() no longer flushes messages added
after the emergency messages.
The console context is safe when used by the kthread only when
one of the following conditions are true:
1. Other caller acquires the console context with
NBCON_PRIO_NORMAL with preemption disabled. It will
release the context before rescheduling.
2. Other caller acquires the console context with
NBCON_PRIO_NORMAL under the device_lock.
3. The kthread is the only context which acquires the console
with NBCON_PRIO_NORMAL.
This is satisfied for all atomic printing call sites:
nbcon_legacy_emit_next_record() (#1)
nbcon_atomic_flush_pending_con() (#1)
nbcon_device_release() (#2)
It is even double guaranteed when @printk_kthreads_running
is set because then _only_ the kthread will print for
NBCON_PRIO_NORMAL. (#3)
Signed-off-by: John Ogness <john.ogness@...utronix.de>
---
kernel/printk/internal.h | 6 +++++-
kernel/printk/nbcon.c | 13 ++++++------
kernel/printk/printk.c | 46 +++++++++++++++++++++++++++++++++++++++-
3 files changed, 57 insertions(+), 8 deletions(-)
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index bb02788acc7c..66321836c3fe 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -190,11 +190,13 @@ extern bool legacy_allow_panic_sync;
/**
* struct console_flush_type - Define how to flush the consoles
* @nbcon_atomic: Flush directly using nbcon_atomic() callback
+ * @nbcon_offload: Offload flush to printer thread
* @legacy_direct: Call the legacy loop in this context
* @legacy_offload: Offload the legacy loop into IRQ
*/
struct console_flush_type {
bool nbcon_atomic;
+ bool nbcon_offload;
bool legacy_direct;
bool legacy_offload;
};
@@ -220,7 +222,9 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft,
ft->legacy_direct = true;
}
- if (have_nbcon_console && !have_boot_console)
+ if (printk_kthreads_running)
+ ft->nbcon_offload = true;
+ else if (have_nbcon_console && !have_boot_console)
ft->nbcon_atomic = true;
break;
diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
index 233ab8f90fef..8cf9e9e8c6e4 100644
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@@ -1511,10 +1511,10 @@ static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
/*
* If flushing was successful but more records are available, this
- * context must flush those remaining records because there is no
- * other context that will do it.
+ * context must flush those remaining records if the printer thread
+ * is not available do it.
*/
- printk_get_console_flush_type(&ft, false);
+ printk_get_console_flush_type(&ft, true);
if (ft.nbcon_atomic &&
prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
stop_seq = prb_next_reserve_seq(prb);
@@ -1809,10 +1809,11 @@ void nbcon_device_release(struct console *con)
/*
* This context must flush any new records added while the console
- * was locked. The console_srcu_read_lock must be taken to ensure
- * the console is usable throughout flushing.
+ * was locked if the printer thread is not available to do it. The
+ * console_srcu_read_lock must be taken to ensure the console is
+ * usable throughout flushing.
*/
- printk_get_console_flush_type(&ft, false);
+ printk_get_console_flush_type(&ft, true);
cookie = console_srcu_read_lock();
if (ft.nbcon_atomic &&
console_is_usable(con, console_srcu_read_flags(con), true) &&
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 71e946a8c5fa..620c02b069bc 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2382,6 +2382,9 @@ asmlinkage int vprintk_emit(int facility, int level,
if (ft.nbcon_atomic)
nbcon_atomic_flush_pending();
+ if (ft.nbcon_offload)
+ nbcon_wake_kthreads();
+
if (!defer_legacy && ft.legacy_direct) {
/*
* The caller may be holding system-critical or
@@ -2680,6 +2683,7 @@ void suspend_console(void)
void resume_console(void)
{
+ struct console_flush_type ft;
struct console *con;
if (!console_suspend_enabled)
@@ -2697,6 +2701,10 @@ void resume_console(void)
*/
synchronize_srcu(&console_srcu);
+ printk_get_console_flush_type(&ft, true);
+ if (ft.nbcon_offload)
+ nbcon_wake_kthreads();
+
pr_flush(1000, true);
}
@@ -3007,6 +3015,7 @@ static inline void printk_kthreads_check_locked(void) { }
*/
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
{
+ struct console_flush_type ft;
bool any_usable = false;
struct console *con;
bool any_progress;
@@ -3018,12 +3027,21 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
do {
any_progress = false;
+ printk_get_console_flush_type(&ft, true);
+
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
short flags = console_srcu_read_flags(con);
u64 printk_seq;
bool progress;
+ /*
+ * console_flush_all() is only for legacy consoles when
+ * the nbcon consoles have their printer threads.
+ */
+ if ((flags & CON_NBCON) && ft.nbcon_offload)
+ continue;
+
if (!console_is_usable(con, flags, !do_cond_resched))
continue;
any_usable = true;
@@ -3334,9 +3352,28 @@ EXPORT_SYMBOL(console_stop);
void console_start(struct console *console)
{
+ struct console_flush_type ft;
+ short flags;
+ int cookie;
+
console_list_lock();
console_srcu_write_flags(console, console->flags | CON_ENABLED);
console_list_unlock();
+
+ /*
+ * Ensure that all SRCU list walks have completed. The related
+ * printing context must be able to see it is enabled so that
+ * it is guaranteed to wake up and resume printing.
+ */
+ synchronize_srcu(&console_srcu);
+
+ printk_get_console_flush_type(&ft, true);
+ cookie = console_srcu_read_lock();
+ flags = console_srcu_read_flags(console);
+ if ((flags & CON_NBCON) && ft.nbcon_offload)
+ nbcon_kthread_wake(console);
+ console_srcu_read_unlock(cookie);
+
__pr_flush(console, 1000, true);
}
EXPORT_SYMBOL(console_start);
@@ -4102,8 +4139,10 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
* that they make forward progress, so only increment
* @diff for usable consoles.
*/
- if (!console_is_usable(c, flags, true))
+ if (!console_is_usable(c, flags, true) &&
+ !console_is_usable(c, flags, false)) {
continue;
+ }
if (flags & CON_NBCON) {
printk_seq = nbcon_seq_read(c);
@@ -4605,8 +4644,13 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
*/
void console_try_replay_all(void)
{
+ struct console_flush_type ft;
+
+ printk_get_console_flush_type(&ft, true);
if (console_trylock()) {
__console_rewind_all();
+ if (ft.nbcon_offload)
+ nbcon_wake_kthreads();
/* Consoles are flushed as part of console_unlock(). */
console_unlock();
}
--
2.39.2
Powered by blists - more mailing lists