[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aEmMdirOERyv8COG@pathway.suse.cz>
Date: Wed, 11 Jun 2025 16:02:30 +0200
From: Petr Mladek <pmladek@...e.com>
To: Thomas Weißschuh <thomas.weissschuh@...utronix.de>
Cc: Steven Rostedt <rostedt@...dmis.org>,
John Ogness <john.ogness@...utronix.de>,
Sergey Senozhatsky <senozhatsky@...omium.org>,
Kees Cook <kees@...nel.org>,
"Gustavo A. R. Silva" <gustavoars@...nel.org>,
John Ogness <jogness@...utronix.de>,
David Gow <davidgow@...gle.com>, linux-kernel@...r.kernel.org,
linux-hardening@...r.kernel.org
Subject: Re: [PATCH v2] printk: ringbuffer: Add KUnit test
On Tue 2025-05-06 13:58:48, Thomas Weißschuh wrote:
> The KUnit test validates the correct operation of the ringbuffer.
> A separate dedicated ringbuffer is used so that the global printk
> ringbuffer is not touched.
>
> Co-developed-by: John Ogness <john.ogness@...utronix.de>
> Signed-off-by: John Ogness <john.ogness@...utronix.de>
> Signed-off-by: Thomas Weißschuh <thomas.weissschuh@...utronix.de>
> --- a/kernel/printk/printk_ringbuffer.c
> +++ b/kernel/printk/printk_ringbuffer.c
> @@ -1685,6 +1686,7 @@ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
> memset(r, 0, sizeof(*r));
> return false;
> }
> +EXPORT_SYMBOL_IF_KUNIT(prb_reserve);
>
> /* Commit the data (possibly finalizing it) and restore interrupts. */
> static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)
[...]
I had to add:
@@ -2337,6 +2337,7 @@ void prb_init(struct printk_ringbuffer *rb,
infos[0].seq = -(u64)_DESCS_COUNT(descbits);
infos[_DESCS_COUNT(descbits) - 1].seq = 0;
}
+EXPORT_SYMBOL_IF_KUNIT(prb_init);
/**
* prb_record_text_space() - Query the full actual used ringbuffer space for
> diff --git a/kernel/printk/printk_ringbuffer_kunit_test.c b/kernel/printk/printk_ringbuffer_kunit_test.c
> new file mode 100644
> index 0000000000000000000000000000000000000000..0d60b2273b710b9b7ecf41b37503beeb76703054
> --- /dev/null
> +++ b/kernel/printk/printk_ringbuffer_kunit_test.c
> @@ -0,0 +1,292 @@
[...]
> +struct prbtest_wakeup_timer {
> + struct timer_list timer;
> + struct task_struct *task;
> +};
> +
> +static void prbtest_wakeup_callback(struct timer_list *timer)
> +{
> + struct prbtest_wakeup_timer *wakeup = from_timer(wakeup, timer, timer);
Just for record. The function from_timer() has been renamed
to timer_container_of() by the commit 41cb08555c4164996
("treewide, timers: Rename from_timer() to timer_container_of()")
in v6.16-rc1.
> + set_tsk_thread_flag(wakeup->task, TIF_NOTIFY_SIGNAL);
> + wake_up_process(wakeup->task);
> +}
> +
> +static int prbtest_reader(struct prbtest_data *test_data, unsigned long timeout_ms)
> +{
> + struct prbtest_wakeup_timer wakeup;
> + char text_buf[MAX_PRB_RECORD_SIZE];
> + unsigned long count = 0;
> + struct printk_info info;
> + struct printk_record r;
> + u64 seq = 0;
> +
> + wakeup.task = current;
> + timer_setup_on_stack(&wakeup.timer, prbtest_wakeup_callback, 0);
> + mod_timer(&wakeup.timer, jiffies + msecs_to_jiffies(timeout_ms));
> +
> + prb_rec_init_rd(&r, &info, text_buf, sizeof(text_buf));
> +
> + kunit_info(test_data->test, "start reader\n");
> +
> + while (!wait_event_interruptible(test_data->new_record_wait,
> + prb_read_valid(test_data->ringbuffer, seq, &r))) {
> + /* check/track the sequence */
> + if (info.seq < seq)
> + KUNIT_FAIL(test_data->test, "BAD SEQ READ: request=%llu read=%llu\n",
> + seq, info.seq);
> +
> + if (!prbtest_check_data((struct prbtest_rbdata *)r.text_buf))
> + prbtest_fail_record(test_data->test,
> + (struct prbtest_rbdata *)r.text_buf, info.seq);
> +
> + if ((count++ & 0x3fff) == 0)
> + cond_resched();
> +
> + seq = info.seq + 1;
> + }
> +
> + timer_delete_sync(&wakeup.timer);
> + destroy_timer_on_stack(&wakeup.timer);
Also this function has been renamed by the commit
aad823aa3a7d675a8d0 ("treewide, timers: Rename
destroy_timer_on_stack() as timer_destroy_on_stack()")
in v6.16-rc1.
> +
> + kunit_info(test_data->test, "end reader: read=%lu seq=%llu\n", count, info.seq);
> +
> + return 0;
> +}
[...]
> +static void test_readerwriter(struct kunit *test)
> +{
> + /* Equivalent to CONFIG_LOG_BUF_SHIFT=13 */
> + DEFINE_PRINTKRB(test_rb, 8, 5);
> +
> + struct prbtest_thread_data *thread_data;
> + struct prbtest_data *test_data;
> + struct task_struct *thread;
> + cpumask_t test_cpus;
> + int cpu, reader_cpu;
> +
> + cpus_read_lock();
> + /*
> + * Failure of KUNIT_ASSERT() kills the current task
> + * so it can not be called while the CPU hotplug lock is held.
> + * Instead use a snapshot of the online CPUs.
> + * If they change during test execution it is unfortunate but not a grave error.
> + */
> + cpumask_copy(&test_cpus, cpu_online_mask);
> + cpus_read_unlock();
> +
> + /* One CPU is for the reader, all others are writers */
> + reader_cpu = cpumask_first(&test_cpus);
> + if (cpumask_weight(&test_cpus) == 1)
> + kunit_warn(test, "more than one CPU is recommended");
> + else
> + cpumask_clear_cpu(reader_cpu, &test_cpus);
> +
I was curious why the reinit (below) was needed. We did not need this for
the default printk log buffer (printk_rb_static).
My understanding is that we need this because the unit test might be
started more times. It might be worth a comment. Something like:
/* KUnit tests cat be re-started more times. */
> + prbtest_prb_reinit(&test_rb);
> +
> + test_data = kunit_kmalloc(test, sizeof(*test_data), GFP_KERNEL);
> + KUNIT_ASSERT_NOT_NULL(test, test_data);
> + test_data->test = test;
> + test_data->ringbuffer = &test_rb;
> + init_waitqueue_head(&test_data->new_record_wait);
> +
> + kunit_info(test, "running for %lu ms\n", runtime_ms);
> +
> + for_each_cpu(cpu, &test_cpus) {
> + thread_data = kunit_kmalloc(test, sizeof(*thread_data), GFP_KERNEL);
> + KUNIT_ASSERT_NOT_NULL(test, thread_data);
> + thread_data->test_data = test_data;
> + thread_data->num = cpu;
> +
> + thread = kthread_run_on_cpu(prbtest_writer, thread_data, cpu,
> + "prbtest writer %u");
> + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, thread);
> + prbtest_add_kthread_cleanup(test, thread);
> + }
> +
> + kunit_info(test, "starting test\n");
> +
> + set_cpus_allowed_ptr(current, cpumask_of(reader_cpu));
> + prbtest_reader(test_data, runtime_ms);
> +
> + kunit_info(test, "completed test\n");
> +}
Otherwise, it looks good to me.
I would push it with the timer API renames so that it works in
Linus' master and linux-next out of box. So, with the following
changes:
Reviewed-by: Petr Mladek <pmladek@...e.com>
Tested-by: Petr Mladek <pmladek@...e.com>
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
index 7d75cf2e15c6..bc811de18316 100644
--- a/kernel/printk/printk_ringbuffer.c
+++ b/kernel/printk/printk_ringbuffer.c
@@ -2337,6 +2337,7 @@ void prb_init(struct printk_ringbuffer *rb,
infos[0].seq = -(u64)_DESCS_COUNT(descbits);
infos[_DESCS_COUNT(descbits) - 1].seq = 0;
}
+EXPORT_SYMBOL_IF_KUNIT(prb_init);
/**
* prb_record_text_space() - Query the full actual used ringbuffer space for
diff --git a/kernel/printk/printk_ringbuffer_kunit_test.c b/kernel/printk/printk_ringbuffer_kunit_test.c
index 0d60b2273b71..6a24294b85f5 100644
--- a/kernel/printk/printk_ringbuffer_kunit_test.c
+++ b/kernel/printk/printk_ringbuffer_kunit_test.c
@@ -153,7 +153,7 @@ struct prbtest_wakeup_timer {
static void prbtest_wakeup_callback(struct timer_list *timer)
{
- struct prbtest_wakeup_timer *wakeup = from_timer(wakeup, timer, timer);
+ struct prbtest_wakeup_timer *wakeup = timer_container_of(wakeup, timer, timer);
set_tsk_thread_flag(wakeup->task, TIF_NOTIFY_SIGNAL);
wake_up_process(wakeup->task);
@@ -194,7 +194,7 @@ static int prbtest_reader(struct prbtest_data *test_data, unsigned long timeout_
}
timer_delete_sync(&wakeup.timer);
- destroy_timer_on_stack(&wakeup.timer);
+ timer_destroy_on_stack(&wakeup.timer);
kunit_info(test_data->test, "end reader: read=%lu seq=%llu\n", count, info.seq);
@@ -245,6 +245,7 @@ static void test_readerwriter(struct kunit *test)
else
cpumask_clear_cpu(reader_cpu, &test_cpus);
+ /* KUnit test can get restarted more times. */
prbtest_prb_reinit(&test_rb);
test_data = kunit_kmalloc(test, sizeof(*test_data), GFP_KERNEL);
Best Regards,
Petr
Powered by blists - more mailing lists