lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <8ff46482b028a4ca69a41193f0ce951dfccc9da6.1586899625.git.asml.silence@gmail.com>
Date:   Wed, 15 Apr 2020 00:39:51 +0300
From:   Pavel Begunkov <asml.silence@...il.com>
To:     Jens Axboe <axboe@...nel.dk>, io-uring@...r.kernel.org,
        linux-kernel@...r.kernel.org
Cc:     Hrvoje Zeba <zeba.hrvoje@...il.com>
Subject: [PATCH 4/4] io_uring: fix timeout's seq catching old requests

As sequence logic use u32 for both req->sequence and sqe->off,
req->sequence + sqe->off can alias to a previously submtitted and
still inflight request, so triggering a timeout on it instead of waiting
for completion of requests submitted after the timeout.

Use u64 for sequences leaving sqe->off to be u32, so the issue will
happen only when there are more than 2^64 - 2^32 inflight requests, that
just won't fit in memory.

note 1: can't modify __req_need_defer() as well, because it's used
without synchronisation for draining, and reading ctx->cached_cq_tail
in 32bit arch won't be atomic.

note 2: io_timeout() overflow magic is left in u32.

Reported-by: Hrvoje Zeba <zeba.hrvoje@...il.com>
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
 fs/io_uring.c | 21 ++++++++++++++-------
 1 file changed, 14 insertions(+), 7 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8ee7b4f72b8f..1961562edf77 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -251,6 +251,7 @@ struct io_ring_ctx {
 
 		wait_queue_head_t	inflight_wait;
 		struct io_uring_sqe	*sq_sqes;
+		u64			sq_submitted;
 	} ____cacheline_aligned_in_smp;
 
 	struct io_rings	*rings;
@@ -302,6 +303,7 @@ struct io_ring_ctx {
 		struct wait_queue_head	cq_wait;
 		struct fasync_struct	*cq_fasync;
 		struct eventfd_ctx	*cq_ev_fd;
+		u64			cq_total;
 	} ____cacheline_aligned_in_smp;
 
 	struct {
@@ -624,7 +626,7 @@ struct io_kiocb {
 	unsigned long		fsize;
 	u64			user_data;
 	u32			result;
-	u32			sequence;
+	u64			sequence;
 
 	struct list_head	link_list;
 
@@ -957,8 +959,8 @@ static inline bool __req_need_defer(struct io_kiocb *req)
 {
 	struct io_ring_ctx *ctx = req->ctx;
 
-	return req->sequence != ctx->cached_cq_tail
-				+ atomic_read(&ctx->cached_cq_overflow);
+	return (u32)req->sequence != ctx->cached_cq_tail
+			+ atomic_read(&ctx->cached_cq_overflow);
 }
 
 static inline bool req_need_defer(struct io_kiocb *req)
@@ -990,7 +992,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
 	if (req) {
 		if (req->flags & REQ_F_TIMEOUT_NOSEQ)
 			return NULL;
-		if (!__req_need_defer(req)) {
+		if (req->sequence == ctx->cq_total) {
 			list_del_init(&req->list);
 			return req;
 		}
@@ -1141,6 +1143,7 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
 	if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
 		return NULL;
 
+	ctx->cq_total++;
 	ctx->cached_cq_tail++;
 	return &rings->cqes[tail & ctx->cq_mask];
 }
@@ -1204,6 +1207,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 		} else {
 			WRITE_ONCE(ctx->rings->cq_overflow,
 				atomic_inc_return(&ctx->cached_cq_overflow));
+			ctx->cq_total++;
 		}
 	}
 
@@ -1244,6 +1248,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
 	} else if (ctx->cq_overflow_flushed) {
 		WRITE_ONCE(ctx->rings->cq_overflow,
 				atomic_inc_return(&ctx->cached_cq_overflow));
+		ctx->cq_total++;
 	} else {
 		if (list_empty(&ctx->cq_overflow_list)) {
 			set_bit(0, &ctx->sq_check_overflow);
@@ -4672,7 +4677,7 @@ static int io_timeout(struct io_kiocb *req)
 	struct list_head *entry;
 	unsigned span = 0;
 	u32 count = req->timeout.count;
-	u32 seq = req->sequence;
+	u32 seq = (u32)req->sequence;
 
 	data = &req->io->timeout;
 
@@ -5730,8 +5735,10 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
 	 *    though the application is the one updating it.
 	 */
 	head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
-	if (likely(head < ctx->sq_entries))
+	if (likely(head < ctx->sq_entries)) {
+		ctx->sq_submitted++;
 		return &ctx->sq_sqes[head];
+	}
 
 	/* drop invalid entries */
 	ctx->cached_sq_dropped++;
@@ -5760,7 +5767,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
 	 * it can be used to mark the position of the first IO in the
 	 * link list.
 	 */
-	req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped;
+	req->sequence = ctx->sq_submitted - 1;
 	req->opcode = READ_ONCE(sqe->opcode);
 	req->user_data = READ_ONCE(sqe->user_data);
 	req->io = NULL;
-- 
2.24.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ