[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201031113459.847920459@linuxfoundation.org>
Date: Sat, 31 Oct 2020 12:35:39 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Pavel Begunkov <asml.silence@...il.com>,
Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 5.8 07/70] io_uring: return cancelation status from poll/timeout/files handlers
From: Jens Axboe <axboe@...nel.dk>
commit 76e1b6427fd8246376a97e3227049d49188dfb9c upstream.
Return whether we found and canceled requests or not. This is in
preparation for using this information, no functional changes in this
patch.
Reviewed-by: Pavel Begunkov <asml.silence@...il.com>
Signed-off-by: Jens Axboe <axboe@...nel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
fs/io_uring.c | 30 ++++++++++++++++++++++++------
1 file changed, 24 insertions(+), 6 deletions(-)
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1143,15 +1143,23 @@ static bool io_task_match(struct io_kioc
return false;
}
-static void io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
+/*
+ * Returns true if we found and killed one or more timeouts
+ */
+static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct io_kiocb *req, *tmp;
+ int canceled = 0;
spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
- if (io_task_match(req, tsk))
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list) {
+ if (io_task_match(req, tsk)) {
io_kill_timeout(req);
+ canceled++;
+ }
+ }
spin_unlock_irq(&ctx->completion_lock);
+ return canceled != 0;
}
static void __io_queue_deferred(struct io_ring_ctx *ctx)
@@ -4650,7 +4658,10 @@ static bool io_poll_remove_one(struct io
return do_complete;
}
-static void io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
+/*
+ * Returns true if we found and killed one or more poll requests
+ */
+static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct hlist_node *tmp;
struct io_kiocb *req;
@@ -4670,6 +4681,8 @@ static void io_poll_remove_all(struct io
if (posted)
io_cqring_ev_posted(ctx);
+
+ return posted != 0;
}
static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
@@ -7744,11 +7757,14 @@ static void io_cancel_defer_files(struct
}
}
-static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+/*
+ * Returns true if we found and killed one or more files pinning requests
+ */
+static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
if (list_empty_careful(&ctx->inflight_list))
- return;
+ return false;
io_cancel_defer_files(ctx, files);
/* cancel all at once, should be faster than doing it one by one*/
@@ -7811,6 +7827,8 @@ static void io_uring_cancel_files(struct
schedule();
finish_wait(&ctx->inflight_wait, &wait);
}
+
+ return true;
}
static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Powered by blists - more mailing lists