[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20251128012412.654828389@kernel.org>
Date: Thu, 27 Nov 2025 20:23:42 -0500
From: Steven Rostedt <rostedt@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: Masami Hiramatsu <mhiramat@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: [for-next][PATCH 11/13] tracing: Add boot-time backup of persistent ring buffer
From: "Masami Hiramatsu (Google)" <mhiramat@...nel.org>
Currently, the persistent ring buffer instance needs to be read before
using it. This means we have to wait for boot up user space and dump
the persistent ring buffer. However, in that case we can not start
tracing on it from the kernel cmdline.
To solve this limitation, this adds an option which allows to create
a trace instance as a backup of the persistent ring buffer at boot.
If user specifies trace_instance=<BACKUP>=<PERSIST_RB> then the
<BACKUP> instance is made as a copy of the <PERSIST_RB> instance.
For example, the below kernel cmdline records all syscalls, scheduler
and interrupt events on the persistent ring buffer `boot_map` but
before starting the tracing, it makes a `backup` instance from the
`boot_map`. Thus, the `backup` instance has the previous boot events.
'reserve_mem=12M:4M:trace trace_instance=boot_map@...ce,syscalls:*,sched:*,irq:* trace_instance=backup=boot_map'
As you can see, this just make a copy of entire reserved area and
make a backup instance on it. So you can release (or shrink) the
backup instance after use it to save the memory usage.
/sys/kernel/tracing/instances # free
total used free shared buff/cache available
Mem: 1999284 55704 1930520 10132 13060 1914628
Swap: 0 0 0
/sys/kernel/tracing/instances # rmdir backup/
/sys/kernel/tracing/instances # free
total used free shared buff/cache available
Mem: 1999284 40640 1945584 10132 13060 1929692
Swap: 0 0 0
Note: since there is no reason to make a copy of empty buffer, this
backup only accepts a persistent ring buffer as the original instance.
Also, since this backup is based on vmalloc(), it does not support
user-space mmap().
Cc: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Link: https://patch.msgid.link/176377150002.219692.9425536150438129267.stgit@devnote2
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@...nel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@...dmis.org>
---
kernel/trace/trace.c | 63 +++++++++++++++++++++++++++++++++++++++-----
kernel/trace/trace.h | 1 +
2 files changed, 58 insertions(+), 6 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 032bdedca5d9..73f8b79f1b0c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9004,8 +9004,8 @@ static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
struct trace_iterator *iter = &info->iter;
int ret = 0;
- /* A memmap'ed buffer is not supported for user space mmap */
- if (iter->tr->flags & TRACE_ARRAY_FL_MEMMAP)
+ /* A memmap'ed and backup buffers are not supported for user space mmap */
+ if (iter->tr->flags & (TRACE_ARRAY_FL_MEMMAP | TRACE_ARRAY_FL_VMALLOC))
return -ENODEV;
ret = get_snapshot_map(iter->tr);
@@ -10520,6 +10520,8 @@ static int __remove_instance(struct trace_array *tr)
reserve_mem_release_by_name(tr->range_name);
kfree(tr->range_name);
}
+ if (tr->flags & TRACE_ARRAY_FL_VMALLOC)
+ vfree((void *)tr->range_addr_start);
for (i = 0; i < tr->nr_topts; i++) {
kfree(tr->topts[i].topts);
@@ -11325,6 +11327,42 @@ __init static void do_allocate_snapshot(const char *name)
static inline void do_allocate_snapshot(const char *name) { }
#endif
+__init static int backup_instance_area(const char *backup,
+ unsigned long *addr, phys_addr_t *size)
+{
+ struct trace_array *backup_tr;
+ void *allocated_vaddr = NULL;
+
+ backup_tr = trace_array_get_by_name(backup, NULL);
+ if (!backup_tr) {
+ pr_warn("Tracing: Instance %s is not found.\n", backup);
+ return -ENOENT;
+ }
+
+ if (!(backup_tr->flags & TRACE_ARRAY_FL_BOOT)) {
+ pr_warn("Tracing: Instance %s is not boot mapped.\n", backup);
+ trace_array_put(backup_tr);
+ return -EINVAL;
+ }
+
+ *size = backup_tr->range_addr_size;
+
+ allocated_vaddr = vzalloc(*size);
+ if (!allocated_vaddr) {
+ pr_warn("Tracing: Failed to allocate memory for copying instance %s (size 0x%lx)\n",
+ backup, (unsigned long)*size);
+ trace_array_put(backup_tr);
+ return -ENOMEM;
+ }
+
+ memcpy(allocated_vaddr,
+ (void *)backup_tr->range_addr_start, (size_t)*size);
+ *addr = (unsigned long)allocated_vaddr;
+
+ trace_array_put(backup_tr);
+ return 0;
+}
+
__init static void enable_instances(void)
{
struct trace_array *tr;
@@ -11347,11 +11385,15 @@ __init static void enable_instances(void)
char *flag_delim;
char *addr_delim;
char *rname __free(kfree) = NULL;
+ char *backup;
tok = strsep(&curr_str, ",");
- flag_delim = strchr(tok, '^');
- addr_delim = strchr(tok, '@');
+ name = strsep(&tok, "=");
+ backup = tok;
+
+ flag_delim = strchr(name, '^');
+ addr_delim = strchr(name, '@');
if (addr_delim)
*addr_delim++ = '\0';
@@ -11359,7 +11401,10 @@ __init static void enable_instances(void)
if (flag_delim)
*flag_delim++ = '\0';
- name = tok;
+ if (backup) {
+ if (backup_instance_area(backup, &addr, &size) < 0)
+ continue;
+ }
if (flag_delim) {
char *flag;
@@ -11455,7 +11500,13 @@ __init static void enable_instances(void)
tr->ref++;
}
- if (start) {
+ /*
+ * Backup buffers can be freed but need vfree().
+ */
+ if (backup)
+ tr->flags |= TRACE_ARRAY_FL_VMALLOC;
+
+ if (start || backup) {
tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT;
tr->range_name = no_free_ptr(rname);
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index a3aa225ed50a..666f9a2c189d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -454,6 +454,7 @@ enum {
TRACE_ARRAY_FL_LAST_BOOT = BIT(2),
TRACE_ARRAY_FL_MOD_INIT = BIT(3),
TRACE_ARRAY_FL_MEMMAP = BIT(4),
+ TRACE_ARRAY_FL_VMALLOC = BIT(5),
};
#ifdef CONFIG_MODULES
--
2.51.0
Powered by blists - more mailing lists