[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240507223402.206d6ddc@rorschach.local.home>
Date: Tue, 7 May 2024 22:34:02 -0400
From: Steven Rostedt <rostedt@...dmis.org>
To: Vincent Donnefort <vdonnefort@...gle.com>
Cc: mhiramat@...nel.org, linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org, mathieu.desnoyers@...icios.com,
kernel-team@...roid.com, rdunlap@...radead.org, rppt@...nel.org,
david@...hat.com, linux-mm@...ck.org
Subject: Re: [PATCH v22 2/5] ring-buffer: Introducing ring-buffer mapping
functions
On Tue, 30 Apr 2024 12:13:51 +0100
Vincent Donnefort <vdonnefort@...gle.com> wrote:
> +#ifdef CONFIG_MMU
> +static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
> + struct vm_area_struct *vma)
> +{
> + unsigned long nr_subbufs, nr_pages, vma_pages, pgoff = vma->vm_pgoff;
> + unsigned int subbuf_pages, subbuf_order;
> + struct page **pages;
> + int p = 0, s = 0;
> + int err;
> +
> + /* Refuse MP_PRIVATE or writable mappings */
> + if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC ||
> + !(vma->vm_flags & VM_MAYSHARE))
> + return -EPERM;
> +
> + /*
> + * Make sure the mapping cannot become writable later. Also tell the VM
> + * to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND). Finally,
> + * prevent migration, GUP and dump (VM_IO).
> + */
> + vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_IO, VM_MAYWRITE);
Do we really need the VM_IO?
When testing this in gdb, I would get:
(gdb) p tmap->map->subbuf_size
Cannot access memory at address 0x7ffff7fc2008
It appears that you can't ptrace IO memory. When I removed that flag,
gdb has no problem reading that memory.
I think we should drop that flag.
Can you send a v23 with that removed, Shuah's update, and also the
change below:
> +
> + lockdep_assert_held(&cpu_buffer->mapping_lock);
> +
> + subbuf_order = cpu_buffer->buffer->subbuf_order;
> + subbuf_pages = 1 << subbuf_order;
> +
> + nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
> + nr_pages = ((nr_subbufs) << subbuf_order) - pgoff + 1; /* + meta-page */
> +
> + vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
> + if (!vma_pages || vma_pages > nr_pages)
> + return -EINVAL;
> +
> + nr_pages = vma_pages;
> +
> + pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
> + if (!pages)
> + return -ENOMEM;
> +
> + if (!pgoff) {
> + pages[p++] = virt_to_page(cpu_buffer->meta_page);
> +
> + /*
> + * TODO: Align sub-buffers on their size, once
> + * vm_insert_pages() supports the zero-page.
> + */
> + } else {
> + /* Skip the meta-page */
> + pgoff--;
> +
> + if (pgoff % subbuf_pages) {
> + err = -EINVAL;
> + goto out;
> + }
> +
> + s += pgoff / subbuf_pages;
> + }
> +
> + while (s < nr_subbufs && p < nr_pages) {
> + struct page *page = virt_to_page(cpu_buffer->subbuf_ids[s]);
> + int off = 0;
> +
> + for (; off < (1 << (subbuf_order)); off++, page++) {
> + if (p >= nr_pages)
> + break;
> +
> + pages[p++] = page;
> + }
> + s++;
> + }
The above can be made to:
while (p < nr_pages) {
struct page *page;
int off = 0;
if (WARN_ON_ONCE(s >= nr_subbufs))
break;
page = virt_to_page(cpu_buffer->subbuf_ids[s]);
for (; off < (1 << (subbuf_order)); off++, page++) {
if (p >= nr_pages)
break;
pages[p++] = page;
}
s++;
}
Thanks.
-- Steve
> +
> + err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
> +
> +out:
> + kfree(pages);
> +
> + return err;
> +}
> +#else
> +static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
> + struct vm_area_struct *vma)
> +{
> + return -EOPNOTSUPP;
> +}
> +#endif
Powered by blists - more mailing lists