[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211213095002.62110-5-tz.stoyanov@gmail.com>
Date: Mon, 13 Dec 2021 11:50:01 +0200
From: "Tzvetomir Stoyanov (VMware)" <tz.stoyanov@...il.com>
To: rostedt@...dmis.org
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH v4 4/5] [RFC] tracing: Set new size of the ring buffer sub page
There are two approaches when changing the size of the ring buffer
sub page:
1. Destroying all pages and allocating new pages with the new size.
2. Allocating new pages, copying the content of the old pages before
destroying them.
The first approach is easier, it is selected in the proposed
implementation. Changing the ring buffer sub page size is supposed to
not happen frequently. Usually, that size should be set only once,
when the buffer is not in use yet and is supposed to be empty.
Signed-off-by: Tzvetomir Stoyanov (VMware) <tz.stoyanov@...il.com>
---
kernel/trace/ring_buffer.c | 80 ++++++++++++++++++++++++++++++++++----
1 file changed, 73 insertions(+), 7 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 4aa5361a8f4c..a40fcb1cb299 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -323,6 +323,7 @@ struct buffer_page {
unsigned read; /* index for next read */
local_t entries; /* entries on this page */
unsigned long real_end; /* real end of data */
+ unsigned order; /* order of the page */
struct buffer_data_page *page; /* Actual data page */
};
@@ -352,7 +353,7 @@ static void rb_init_page(struct buffer_data_page *bpage)
*/
static void free_buffer_page(struct buffer_page *bpage)
{
- free_page((unsigned long)bpage->page);
+ free_pages((unsigned long)bpage->page, bpage->order);
kfree(bpage);
}
@@ -1563,10 +1564,12 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
list_add(&bpage->list, pages);
- page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0);
+ page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags,
+ cpu_buffer->buffer->subbuf_order);
if (!page)
goto free_pages;
bpage->page = page_address(page);
+ bpage->order = cpu_buffer->buffer->subbuf_order;
rb_init_page(bpage->page);
if (user_thread && fatal_signal_pending(current))
@@ -1645,7 +1648,8 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
rb_check_bpage(cpu_buffer, bpage);
cpu_buffer->reader_page = bpage;
- page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
+
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, cpu_buffer->buffer->subbuf_order);
if (!page)
goto fail_free_reader;
bpage->page = page_address(page);
@@ -1725,6 +1729,7 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
goto fail_free_buffer;
/* Default buffer page size - one system page */
+ buffer->subbuf_order = 0;
buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
/* Max payload is buffer page size - header (8bytes) */
@@ -5434,8 +5439,8 @@ void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
if (bpage)
goto out;
- page = alloc_pages_node(cpu_to_node(cpu),
- GFP_KERNEL | __GFP_NORETRY, 0);
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY,
+ cpu_buffer->buffer->subbuf_order);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -5479,7 +5484,7 @@ void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data
local_irq_restore(flags);
out:
- free_page((unsigned long)bpage);
+ free_pages((unsigned long)bpage, buffer->subbuf_order);
}
EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
@@ -5731,7 +5736,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
*/
int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
{
+ struct ring_buffer_per_cpu **cpu_buffers;
+ int old_order, old_size;
+ int nr_pages;
int psize;
+ int bsize;
+ int err;
+ int cpu;
if (!buffer || order < 0)
return -EINVAL;
@@ -5743,12 +5754,67 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
if (psize <= BUF_PAGE_HDR_SIZE)
return -EINVAL;
+ bsize = sizeof(void *) * buffer->cpus;
+ cpu_buffers = kzalloc(bsize, GFP_KERNEL);
+ if (!cpu_buffers)
+ return -ENOMEM;
+
+ old_order = buffer->subbuf_order;
+ old_size = buffer->subbuf_size;
+
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
+ atomic_inc(&buffer->record_disabled);
+
+ /* Make sure all commits have finished */
+ synchronize_rcu();
+
buffer->subbuf_order = order;
buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
- /* Todo: reset the buffer with the new page size */
+ /* Make sure all new buffers are allocated, before deleting the old ones */
+ for_each_buffer_cpu(buffer, cpu) {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ continue;
+
+ nr_pages = buffer->buffers[cpu]->nr_pages;
+ cpu_buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
+ if (!cpu_buffers[cpu]) {
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ for_each_buffer_cpu(buffer, cpu) {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ continue;
+
+ rb_free_cpu_buffer(buffer->buffers[cpu]);
+ buffer->buffers[cpu] = cpu_buffers[cpu];
+ }
+
+ atomic_dec(&buffer->record_disabled);
+ mutex_unlock(&buffer->mutex);
+
+ kfree(cpu_buffers);
return 0;
+
+error:
+ buffer->subbuf_order = old_order;
+ buffer->subbuf_size = old_size;
+
+ atomic_dec(&buffer->record_disabled);
+ mutex_unlock(&buffer->mutex);
+
+ for_each_buffer_cpu(buffer, cpu) {
+ if (!cpu_buffers[cpu])
+ continue;
+ kfree(cpu_buffers[cpu]);
+ }
+ kfree(cpu_buffers);
+
+ return err;
}
EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
--
2.31.1
Powered by blists - more mailing lists