[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221019083327.264626413@linuxfoundation.org>
Date: Wed, 19 Oct 2022 10:35:33 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Stefan Metzmacher <metze@...ba.org>,
Pavel Begunkov <asml.silence@...il.com>,
Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 6.0 846/862] io_uring/net: dont skip notifs for failed requests
From: Pavel Begunkov <asml.silence@...il.com>
[ upstream commit 6ae91ac9a6aa7d6005c3c6d0f4d263fbab9f377f ]
We currently only add a notification CQE when the send succeded, i.e.
cqe.res >= 0. However, it'd be more robust to do buffer notifications
for failed requests as well in case drivers decide do something fanky.
Always return a buffer notification after initial prep, don't hide it.
This behaviour is better aligned with documentation and the patch also
helps the userspace to respect it.
Cc: stable@...r.kernel.org # 6.0
Suggested-by: Stefan Metzmacher <metze@...ba.org>
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
Link: https://lore.kernel.org/r/9c8bead87b2b980fcec441b8faef52188b4a6588.1664292100.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@...nel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
io_uring/net.c | 22 ++++++----------------
1 file changed, 6 insertions(+), 16 deletions(-)
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -879,7 +879,6 @@ void io_send_zc_cleanup(struct io_kiocb
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
- zc->notif->flags |= REQ_F_CQE_SKIP;
io_notif_flush(zc->notif);
zc->notif = NULL;
}
@@ -996,7 +995,7 @@ int io_send_zc(struct io_kiocb *req, uns
struct msghdr msg;
struct iovec iov;
struct socket *sock;
- unsigned msg_flags, cflags;
+ unsigned msg_flags;
int ret, min_ret = 0;
sock = sock_from_file(req->file);
@@ -1064,8 +1063,6 @@ int io_send_zc(struct io_kiocb *req, uns
req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_addr(req, addr, issue_flags);
}
- if (ret < 0 && !zc->done_io)
- zc->notif->flags |= REQ_F_CQE_SKIP;
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
@@ -1078,8 +1075,7 @@ int io_send_zc(struct io_kiocb *req, uns
io_notif_flush(zc->notif);
req->flags &= ~REQ_F_NEED_CLEANUP;
- cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
- io_req_set_res(req, ret, cflags);
+ io_req_set_res(req, ret, IORING_CQE_F_MORE);
return IOU_OK;
}
@@ -1096,17 +1092,11 @@ void io_sendrecv_fail(struct io_kiocb *r
void io_send_zc_fail(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- int res = req->cqe.res;
- if (req->flags & REQ_F_PARTIAL_IO) {
- if (req->flags & REQ_F_NEED_CLEANUP) {
- io_notif_flush(sr->notif);
- sr->notif = NULL;
- req->flags &= ~REQ_F_NEED_CLEANUP;
- }
- res = sr->done_io;
- }
- io_req_set_res(req, res, req->cqe.flags);
+ if (req->flags & REQ_F_PARTIAL_IO)
+ req->cqe.res = sr->done_io;
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ req->cqe.flags |= IORING_CQE_F_MORE;
}
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Powered by blists - more mailing lists