[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201031113500.202920716@linuxfoundation.org>
Date: Sat, 31 Oct 2020 12:35:45 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Pavel Begunkov <asml.silence@...il.com>,
Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 5.9 03/74] io_uring: allow timeout/poll/files killing to take task into account
From: Jens Axboe <axboe@...nel.dk>
commit 07d3ca52b0056f25eef61b1c896d089f8d365468 upstream.
We currently cancel these when the ring exits, and we cancel all of
them. This is in preparation for killing only the ones associated
with a given task.
Reviewed-by: Pavel Begunkov <asml.silence@...il.com>
Signed-off-by: Jens Axboe <axboe@...nel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
fs/io_uring.c | 33 ++++++++++++++++++++++++---------
1 file changed, 24 insertions(+), 9 deletions(-)
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1226,13 +1226,26 @@ static void io_kill_timeout(struct io_ki
}
}
-static void io_kill_timeouts(struct io_ring_ctx *ctx)
+static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!tsk || req->task == tsk)
+ return true;
+ if ((ctx->flags & IORING_SETUP_SQPOLL) && req->task == ctx->sqo_thread)
+ return true;
+ return false;
+}
+
+static void io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct io_kiocb *req, *tmp;
spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list)
- io_kill_timeout(req);
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+ if (io_task_match(req, tsk))
+ io_kill_timeout(req);
+ }
spin_unlock_irq(&ctx->completion_lock);
}
@@ -5017,7 +5030,7 @@ static bool io_poll_remove_one(struct io
return do_complete;
}
-static void io_poll_remove_all(struct io_ring_ctx *ctx)
+static void io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct hlist_node *tmp;
struct io_kiocb *req;
@@ -5028,8 +5041,10 @@ static void io_poll_remove_all(struct io
struct hlist_head *list;
list = &ctx->cancel_hash[i];
- hlist_for_each_entry_safe(req, tmp, list, hash_node)
- posted += io_poll_remove_one(req);
+ hlist_for_each_entry_safe(req, tmp, list, hash_node) {
+ if (io_task_match(req, tsk))
+ posted += io_poll_remove_one(req);
+ }
}
spin_unlock_irq(&ctx->completion_lock);
@@ -7989,8 +8004,8 @@ static void io_ring_ctx_wait_and_kill(st
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);
- io_kill_timeouts(ctx);
- io_poll_remove_all(ctx);
+ io_kill_timeouts(ctx, NULL);
+ io_poll_remove_all(ctx, NULL);
if (ctx->io_wq)
io_wq_cancel_all(ctx->io_wq);
@@ -8221,7 +8236,7 @@ static bool io_cancel_task_cb(struct io_
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct task_struct *task = data;
- return req->task == task;
+ return io_task_match(req, task);
}
static int io_uring_flush(struct file *file, void *data)
Powered by blists - more mailing lists