[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <11a42896513c9349fad154c201e69e03ec52bf8c.1579901866.git.asml.silence@gmail.com>
Date: Sat, 25 Jan 2020 00:40:29 +0300
From: Pavel Begunkov <asml.silence@...il.com>
To: Jens Axboe <axboe@...nel.dk>, io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 6/8] io_uring: move *link into io_submit_state
It's more convenient to have it in the submission state, than passing as
a pointer, so move it.
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
fs/io_uring.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c0e72390d272..f022453e3839 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -221,6 +221,7 @@ struct io_submit_state {
int ring_fd;
struct mm_struct *mm;
+ struct io_kiocb *link;
};
struct io_ring_ctx {
@@ -4664,10 +4665,10 @@ static inline void io_queue_link_head(struct io_kiocb *req)
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
IOSQE_IO_HARDLINK | IOSQE_ASYNC)
-static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **link)
+static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_submit_state *state = &ctx->submit_state;
unsigned int sqe_flags;
int ret;
@@ -4697,8 +4698,8 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
* submitted sync once the chain is complete. If none of those
* conditions are true (normal request), then just queue it.
*/
- if (*link) {
- struct io_kiocb *head = *link;
+ if (state->link) {
+ struct io_kiocb *head = state->link;
/*
* Taking sequential execution of a link, draining both sides
@@ -4728,7 +4729,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/* last request of a link, enqueue the link */
if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
io_queue_link_head(head);
- *link = NULL;
+ state->link = NULL;
}
} else {
if (unlikely(ctx->drain_next)) {
@@ -4741,7 +4742,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ret = io_req_defer_prep(req, sqe);
if (ret)
req->flags |= REQ_F_FAIL_LINK;
- *link = req;
+ state->link = req;
} else {
io_queue_sqe(req, sqe);
}
@@ -4761,6 +4762,8 @@ static void io_submit_end(struct io_ring_ctx *ctx)
if (state->free_reqs)
kmem_cache_free_bulk(req_cachep, state->free_reqs,
&state->reqs[state->cur_req]);
+ if (state->link)
+ io_queue_link_head(state->link);
}
/*
@@ -4777,6 +4780,7 @@ static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios,
state->ring_file = ring_file;
state->ring_fd = ring_fd;
+ state->link = NULL;
}
static void io_commit_sqring(struct io_ring_ctx *ctx)
@@ -4839,7 +4843,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct file *ring_file, int ring_fd, bool async)
{
struct blk_plug plug;
- struct io_kiocb *link = NULL;
int i, submitted = 0;
bool mm_fault = false;
@@ -4897,14 +4900,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
true, async);
- if (!io_submit_sqe(req, sqe, &link))
+ if (!io_submit_sqe(req, sqe))
break;
}
if (submitted != nr)
percpu_ref_put_many(&ctx->refs, nr - submitted);
- if (link)
- io_queue_link_head(link);
io_submit_end(ctx);
if (nr > IO_PLUG_THRESHOLD)
--
2.24.0
Powered by blists - more mailing lists