[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190430113610.309054125@linuxfoundation.org>
Date: Tue, 30 Apr 2019 13:38:03 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Ingo Molnar <mingo@...hat.com>,
Masami Hiramatsu <mhiramat@...nel.org>,
Al Viro <viro@...iv.linux.org.uk>,
Jann Horn <jannh@...gle.com>,
"Steven Rostedt (VMware)" <rostedt@...dmis.org>
Subject: [PATCH 5.0 12/89] tracing: Fix buffer_ref pipe ops
From: Jann Horn <jannh@...gle.com>
commit b987222654f84f7b4ca95b3a55eca784cb30235b upstream.
This fixes multiple issues in buffer_pipe_buf_ops:
- The ->steal() handler must not return zero unless the pipe buffer has
the only reference to the page. But generic_pipe_buf_steal() assumes
that every reference to the pipe is tracked by the page's refcount,
which isn't true for these buffers - buffer_pipe_buf_get(), which
duplicates a buffer, doesn't touch the page's refcount.
Fix it by using generic_pipe_buf_nosteal(), which refuses every
attempted theft. It should be easy to actually support ->steal, but the
only current users of pipe_buf_steal() are the virtio console and FUSE,
and they also only use it as an optimization. So it's probably not worth
the effort.
- The ->get() and ->release() handlers can be invoked concurrently on pipe
buffers backed by the same struct buffer_ref. Make them safe against
concurrency by using refcount_t.
- The pointers stored in ->private were only zeroed out when the last
reference to the buffer_ref was dropped. As far as I know, this
shouldn't be necessary anyway, but if we do it, let's always do it.
Link: http://lkml.kernel.org/r/20190404215925.253531-1-jannh@google.com
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Masami Hiramatsu <mhiramat@...nel.org>
Cc: Al Viro <viro@...iv.linux.org.uk>
Cc: stable@...r.kernel.org
Fixes: 73a757e63114d ("ring-buffer: Return reader page back into existing ring buffer")
Signed-off-by: Jann Horn <jannh@...gle.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@...dmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
fs/splice.c | 4 ++--
include/linux/pipe_fs_i.h | 1 +
kernel/trace/trace.c | 28 ++++++++++++++--------------
3 files changed, 17 insertions(+), 16 deletions(-)
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -333,8 +333,8 @@ const struct pipe_buf_operations default
.get = generic_pipe_buf_get,
};
-static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
return 1;
}
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -181,6 +181,7 @@ void free_pipe_info(struct pipe_inode_in
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6823,19 +6823,23 @@ struct buffer_ref {
struct ring_buffer *buffer;
void *page;
int cpu;
- int ref;
+ refcount_t refcount;
};
+static void buffer_ref_release(struct buffer_ref *ref)
+{
+ if (!refcount_dec_and_test(&ref->refcount))
+ return;
+ ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
+ kfree(ref);
+}
+
static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
- if (--ref->ref)
- return;
-
- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
- kfree(ref);
+ buffer_ref_release(ref);
buf->private = 0;
}
@@ -6844,7 +6848,7 @@ static void buffer_pipe_buf_get(struct p
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
- ref->ref++;
+ refcount_inc(&ref->refcount);
}
/* Pipe buffer operations for a buffer. */
@@ -6852,7 +6856,7 @@ static const struct pipe_buf_operations
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = buffer_pipe_buf_release,
- .steal = generic_pipe_buf_steal,
+ .steal = generic_pipe_buf_nosteal,
.get = buffer_pipe_buf_get,
};
@@ -6865,11 +6869,7 @@ static void buffer_spd_release(struct sp
struct buffer_ref *ref =
(struct buffer_ref *)spd->partial[i].private;
- if (--ref->ref)
- return;
-
- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
- kfree(ref);
+ buffer_ref_release(ref);
spd->partial[i].private = 0;
}
@@ -6924,7 +6924,7 @@ tracing_buffers_splice_read(struct file
break;
}
- ref->ref = 1;
+ refcount_set(&ref->refcount, 1);
ref->buffer = iter->trace_buffer->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (IS_ERR(ref->page)) {
Powered by blists - more mailing lists