[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231213000452.88295-9-graf@amazon.com>
Date: Wed, 13 Dec 2023 00:04:45 +0000
From: Alexander Graf <graf@...zon.com>
To: <linux-kernel@...r.kernel.org>
CC: <linux-trace-kernel@...r.kernel.org>, <linux-mm@...ck.org>,
<devicetree@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<kexec@...ts.infradead.org>, <linux-doc@...r.kernel.org>,
<x86@...nel.org>, Eric Biederman <ebiederm@...ssion.com>,
"H. Peter Anvin" <hpa@...or.com>,
Andy Lutomirski <luto@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
"Rob Herring" <robh+dt@...nel.org>,
Steven Rostedt <rostedt@...dmis.org>,
"Andrew Morton" <akpm@...ux-foundation.org>,
Mark Rutland <mark.rutland@....com>,
"Tom Lendacky" <thomas.lendacky@....com>,
Ashish Kalra <ashish.kalra@....com>,
James Gowans <jgowans@...zon.com>,
Stanislav Kinsburskii <skinsburskii@...ux.microsoft.com>,
<arnd@...db.de>, <pbonzini@...hat.com>,
<madvenka@...ux.microsoft.com>,
Anthony Yznaga <anthony.yznaga@...cle.com>,
Usama Arif <usama.arif@...edance.com>,
David Woodhouse <dwmw@...zon.co.uk>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>
Subject: [PATCH 08/15] tracing: Introduce names for ring buffers
With KHO (Kexec HandOver), we want to preserve trace buffers across
kexec. To carry over their state between kernels, the kernel needs a
common handle for them that exists on both sides. As handle we introduce
names for ring buffers. In a follow-up patch, the kernel can then use
these names to recover buffer contents for specific ring buffers.
Signed-off-by: Alexander Graf <graf@...zon.com>
---
include/linux/ring_buffer.h | 7 ++++---
kernel/trace/ring_buffer.c | 5 ++++-
kernel/trace/trace.c | 7 ++++---
3 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 782e14f62201..f34538f97c75 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -85,17 +85,18 @@ void ring_buffer_discard_commit(struct trace_buffer *buffer,
* size is in bytes for each per CPU buffer.
*/
struct trace_buffer *
-__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
+__ring_buffer_alloc(const char *name, unsigned long size, unsigned flags,
+ struct lock_class_key *key);
/*
* Because the ring buffer is generic, if other users of the ring buffer get
* traced by ftrace, it can produce lockdep warnings. We need to keep each
* ring buffer's lock class separate.
*/
-#define ring_buffer_alloc(size, flags) \
+#define ring_buffer_alloc(name, size, flags) \
({ \
static struct lock_class_key __key; \
- __ring_buffer_alloc((size), (flags), &__key); \
+ __ring_buffer_alloc((name), (size), (flags), &__key); \
})
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 43cc47d7faaf..eaaf823ddedb 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -557,6 +557,7 @@ struct trace_buffer {
struct rb_irq_work irq_work;
bool time_stamp_abs;
+ const char *name;
};
struct ring_buffer_iter {
@@ -1801,7 +1802,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
* when the buffer wraps. If this flag is not set, the buffer will
* drop data when the tail hits the head.
*/
-struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+struct trace_buffer *__ring_buffer_alloc(const char *name,
+ unsigned long size, unsigned flags,
struct lock_class_key *key)
{
struct trace_buffer *buffer;
@@ -1823,6 +1825,7 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
buffer->flags = flags;
buffer->clock = trace_clock_local;
buffer->reader_lock_key = key;
+ buffer->name = name;
init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
init_waitqueue_head(&buffer->irq_work.waiters);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9aebf904ff97..7700ca1be2a5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9384,7 +9384,8 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
buf->tr = tr;
- buf->buffer = ring_buffer_alloc(size, rb_flags);
+ buf->buffer = ring_buffer_alloc(tr->name ? tr->name : "global_trace",
+ size, rb_flags);
if (!buf->buffer)
return -ENOMEM;
@@ -9421,7 +9422,7 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
return ret;
#ifdef CONFIG_TRACER_MAX_TRACE
- ret = allocate_trace_buffer(tr, &tr->max_buffer,
+ ret = allocate_trace_buffer(NULL, &tr->max_buffer,
allocate_snapshot ? size : 1);
if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
free_trace_buffer(&tr->array_buffer);
@@ -10473,7 +10474,7 @@ __init static int tracer_alloc_buffers(void)
goto out_free_cpumask;
/* Used for event triggers */
ret = -ENOMEM;
- temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
+ temp_buffer = ring_buffer_alloc("temp_buffer", PAGE_SIZE, RB_FL_OVERWRITE);
if (!temp_buffer)
goto out_rm_hp_state;
--
2.40.1
Amazon Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 149173 B
Sitz: Berlin
Ust-ID: DE 289 237 879
Powered by blists - more mailing lists