[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231221125124.49ee8201@gandalf.local.home>
Date: Thu, 21 Dec 2023 12:51:24 -0500
From: Steven Rostedt <rostedt@...dmis.org>
To: Vincent Donnefort <vdonnefort@...gle.com>
Cc: mhiramat@...nel.org, linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: Re: [PATCH v9 1/2] ring-buffer: Introducing ring-buffer mapping
functions
On Thu, 21 Dec 2023 17:35:22 +0000
Vincent Donnefort <vdonnefort@...gle.com> wrote:
> @@ -739,6 +747,22 @@ static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int f
> return (dirty * 100) > (full * nr_pages);
> }
>
> +static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
> +{
> + if (unlikely(READ_ONCE(cpu_buffer->mapped))) {
> + /* Ensure the meta_page is ready */
> + smp_rmb();
> + WRITE_ONCE(cpu_buffer->meta_page->entries,
> + local_read(&cpu_buffer->entries));
> + WRITE_ONCE(cpu_buffer->meta_page->overrun,
> + local_read(&cpu_buffer->overrun));
> + WRITE_ONCE(cpu_buffer->meta_page->subbufs_touched,
> + local_read(&cpu_buffer->pages_touched));
> + WRITE_ONCE(cpu_buffer->meta_page->subbufs_lost,
> + local_read(&cpu_buffer->pages_lost));
> + }
> +}
> +
> /*
> * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
> *
> @@ -749,6 +773,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
> {
> struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
>
> + if (rbwork->is_cpu_buffer) {
> + struct ring_buffer_per_cpu *cpu_buffer;
> +
> + cpu_buffer = container_of(rbwork, struct ring_buffer_per_cpu,
> + irq_work);
> + /*
> + * If the waiter is a cpu_buffer, this might be due to a
> + * userspace mapping. Let's update the meta-page.
> + */
> + rb_update_meta_page(cpu_buffer);
> + }
> +
> wake_up_all(&rbwork->waiters);
> if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
> rbwork->wakeup_full = false;
I think this code would be cleaner if we did:
static void rb_update_meta_page(strucrt rb_irq_work *rbwork)
{
struct ring_buffer_per_cpu *cpu_buffer;
if (!rbwork->is_cpu_buffer)
return;
/*
* If the waiter is a cpu_buffer, this might be due to a
* userspace mapping. Let's update the meta-page.
*/
cpu_buffer = container_of(rbwork, struct ring_buffer_per_cpu,
irq_work);
if (unlikely(READ_ONCE(cpu_buffer->mapped))) {
// I don't think we need the "unlikely"
/* Ensure the meta_page is ready */
smp_rmb();
WRITE_ONCE(cpu_buffer->meta_page->entries,
local_read(&cpu_buffer->entries));
WRITE_ONCE(cpu_buffer->meta_page->overrun,
local_read(&cpu_buffer->overrun));
WRITE_ONCE(cpu_buffer->meta_page->subbufs_touched,
local_read(&cpu_buffer->pages_touched));
WRITE_ONCE(cpu_buffer->meta_page->subbufs_lost,
local_read(&cpu_buffer->pages_lost));
}
}
/*
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
*
* Schedules a delayed work to wake up any task that is blocked on the
* ring buffer waiters queue.
*/
static void rb_wake_up_waiters(struct irq_work *work)
{
struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
rb_update_meta_page(cpu_buffer);
wake_up_all(&rbwork->waiters);
if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
rbwork->wakeup_full = false;
rbwork->full_waiters_pending = false;
wake_up_all(&rbwork->full_waiters);
}
}
-- Steve
Powered by blists - more mailing lists