[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201009124954.31830-2-willy@infradead.org>
Date: Fri, 9 Oct 2020 13:49:52 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: Jens Axboe <axboe@...nel.dk>
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-fsdevel@...r.kernel.org, io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org,
Pavel Begunkov <asml.silence@...il.com>
Subject: [PATCH 2/3] io_uring: Fix XArray usage in io_uring_add_task_file
The xas_store() wasn't paired with an xas_nomem() loop, so if it couldn't
allocate memory using GFP_NOWAIT, it would leak the reference to the file
descriptor. Also the node pointed to by the xas could be freed between
the call to xas_load() under the rcu_read_lock() and the acquisition of
the xa_lock.
It's easier to just use the normal xa_load/xa_store interface here.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
fs/io_uring.c | 21 +++++++++------------
1 file changed, 9 insertions(+), 12 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 2978cc78538a..bcef6210bf67 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8586,27 +8586,24 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
*/
static int io_uring_add_task_file(struct file *file)
{
- if (unlikely(!current->io_uring)) {
+ struct io_uring_task *cur_uring = current->io_uring;
+
+ if (unlikely(!cur_uring)) {
int ret;
ret = io_uring_alloc_task_context(current);
if (unlikely(ret))
return ret;
}
- if (current->io_uring->last != file) {
- XA_STATE(xas, ¤t->io_uring->xa, (unsigned long) file);
- void *old;
+ if (cur_uring->last != file) {
+ void *old = xa_load(&cur_uring->xa, (unsigned long)file);
- rcu_read_lock();
- old = xas_load(&xas);
- if (old != file) {
+ if (!old) {
get_file(file);
- xas_lock(&xas);
- xas_store(&xas, file);
- xas_unlock(&xas);
+ xa_store(&cur_uring->xa, (unsigned long)file, file,
+ GFP_KERNEL);
}
- rcu_read_unlock();
- current->io_uring->last = file;
+ cur_uring->last = file;
}
return 0;
--
2.28.0
Powered by blists - more mailing lists