[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <f694f6b81e555c15ee0e75ffbe9509bbfa8d4f27.1589713554.git.asml.silence@gmail.com>
Date: Sun, 17 May 2020 14:13:40 +0300
From: Pavel Begunkov <asml.silence@...il.com>
To: Jens Axboe <axboe@...nel.dk>, io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 1/3] io_uring: remove req->needs_fixed_files
A submission is "async" IIF it's done by SQPOLL thread. Instead of
passing @async flag into io_submit_sqes(), deduce it from ctx->flags.
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
fs/io_uring.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3d0a08560689..739aae7070c1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -626,7 +626,6 @@ struct io_kiocb {
struct io_async_ctx *io;
int cflags;
- bool needs_fixed_file;
u8 opcode;
struct io_ring_ctx *ctx;
@@ -891,6 +890,11 @@ EXPORT_SYMBOL(io_uring_get_socket);
static void io_file_put_work(struct work_struct *work);
+static inline bool io_async_submit(struct io_ring_ctx *ctx)
+{
+ return ctx->flags & IORING_SETUP_SQPOLL;
+}
+
static void io_ring_ctx_ref_free(struct percpu_ref *ref)
{
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
@@ -5487,7 +5491,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
bool fixed;
fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
- if (unlikely(!fixed && req->needs_fixed_file))
+ if (unlikely(!fixed && io_async_submit(req->ctx)))
return -EBADF;
return io_file_get(state, req, fd, &req->file, fixed);
@@ -5866,7 +5870,7 @@ static inline void io_consume_sqe(struct io_ring_ctx *ctx)
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe,
- struct io_submit_state *state, bool async)
+ struct io_submit_state *state)
{
unsigned int sqe_flags;
int id;
@@ -5887,7 +5891,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
refcount_set(&req->refs, 2);
req->task = NULL;
req->result = 0;
- req->needs_fixed_file = async;
INIT_IO_WORK(&req->work, io_wq_submit_work);
if (unlikely(req->opcode >= IORING_OP_LAST))
@@ -5928,7 +5931,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
}
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
- struct file *ring_file, int ring_fd, bool async)
+ struct file *ring_file, int ring_fd)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
@@ -5972,7 +5975,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
break;
}
- err = io_init_req(ctx, req, sqe, statep, async);
+ err = io_init_req(ctx, req, sqe, statep);
io_consume_sqe(ctx);
/* will complete beyond this point, count as submitted */
submitted++;
@@ -5985,7 +5988,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
}
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
- true, async);
+ true, io_async_submit(ctx));
err = io_submit_sqe(req, sqe, &link);
if (err)
goto fail_req;
@@ -6124,7 +6127,7 @@ static int io_sq_thread(void *data)
}
mutex_lock(&ctx->uring_lock);
- ret = io_submit_sqes(ctx, to_submit, NULL, -1, true);
+ ret = io_submit_sqes(ctx, to_submit, NULL, -1);
mutex_unlock(&ctx->uring_lock);
timeout = jiffies + ctx->sq_thread_idle;
}
@@ -7635,7 +7638,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
submitted = to_submit;
} else if (to_submit) {
mutex_lock(&ctx->uring_lock);
- submitted = io_submit_sqes(ctx, to_submit, f.file, fd, false);
+ submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
mutex_unlock(&ctx->uring_lock);
if (submitted != to_submit)
--
2.24.0
Powered by blists - more mailing lists