[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220412062931.969405079@linuxfoundation.org>
Date: Tue, 12 Apr 2022 08:30:32 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 5.10 141/171] io_uring: fix race between timeout flush and removal
From: Jens Axboe <axboe@...nel.dk>
commit e677edbcabee849bfdd43f1602bccbecf736a646 upstream.
io_flush_timeouts() assumes the timeout isn't in progress of triggering
or being removed/canceled, so it unconditionally removes it from the
timeout list and attempts to cancel it.
Leave it on the list and let the normal timeout cancelation take care
of it.
Cc: stable@...r.kernel.org # 5.5+
Signed-off-by: Jens Axboe <axboe@...nel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
fs/io_uring.c | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1556,6 +1556,7 @@ static void __io_queue_deferred(struct i
static void io_flush_timeouts(struct io_ring_ctx *ctx)
{
+ struct io_kiocb *req, *tmp;
u32 seq;
if (list_empty(&ctx->timeout_list))
@@ -1563,10 +1564,8 @@ static void io_flush_timeouts(struct io_
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
- do {
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
u32 events_needed, events_got;
- struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
- struct io_kiocb, timeout.list);
if (io_is_timeout_noseq(req))
break;
@@ -1583,9 +1582,8 @@ static void io_flush_timeouts(struct io_
if (events_got < events_needed)
break;
- list_del_init(&req->timeout.list);
io_kill_timeout(req, 0);
- } while (!list_empty(&ctx->timeout_list));
+ }
ctx->cq_last_tm_flush = seq;
}
@@ -5639,6 +5637,7 @@ static int io_timeout_prep(struct io_kio
else
data->mode = HRTIMER_MODE_REL;
+ INIT_LIST_HEAD(&req->timeout.list);
hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
return 0;
}
@@ -6282,12 +6281,12 @@ static enum hrtimer_restart io_link_time
if (!list_empty(&req->link_list)) {
prev = list_entry(req->link_list.prev, struct io_kiocb,
link_list);
- if (refcount_inc_not_zero(&prev->refs))
- list_del_init(&req->link_list);
- else
+ list_del_init(&req->link_list);
+ if (!refcount_inc_not_zero(&prev->refs))
prev = NULL;
}
+ list_del(&req->timeout.list);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) {
Powered by blists - more mailing lists