[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <d4b705af71f076a9cf845aa3fc06d6f9866f84f8.1579981749.git.asml.silence@gmail.com>
Date: Sat, 25 Jan 2020 22:53:40 +0300
From: Pavel Begunkov <asml.silence@...il.com>
To: Jens Axboe <axboe@...nel.dk>, io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 3/8] io_uring: place io_submit_state into ctx
io_submit_state is used only during submmission and holding
ctx->uring_lock, so only one instance is used at a time. Move it into
struct io_ring_ctx, so it:
- doesn't consume on-stack memory
- persists across io_uring_enter
- available without passing it through the call-stack
The last point is very useful to make opcode handlers manage their
resources themselfs, like splice would. Also, it's a base for other
hackish optimisations in the future.
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
fs/io_uring.c | 75 +++++++++++++++++++++++++++------------------------
1 file changed, 40 insertions(+), 35 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f4e7575b511d..8b159e21a35f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -197,6 +197,27 @@ struct fixed_file_data {
struct completion done;
};
+#define IO_PLUG_THRESHOLD 2
+#define IO_IOPOLL_BATCH 8
+
+struct io_submit_state {
+ /*
+ * io_kiocb alloc cache
+ */
+ void *reqs[IO_IOPOLL_BATCH];
+ unsigned int free_reqs;
+ unsigned int cur_req;
+
+ /*
+ * File reference cache
+ */
+ struct file *file;
+ unsigned int fd;
+ unsigned int has_refs;
+ unsigned int used_refs;
+ unsigned int ios_left;
+};
+
struct io_ring_ctx {
struct {
struct percpu_ref refs;
@@ -308,6 +329,9 @@ struct io_ring_ctx {
spinlock_t inflight_lock;
struct list_head inflight_list;
} ____cacheline_aligned_in_smp;
+
+ /* protected by uring_lock */
+ struct io_submit_state submit_state;
};
/*
@@ -573,27 +597,6 @@ struct io_kiocb {
struct io_wq_work work;
};
-#define IO_PLUG_THRESHOLD 2
-#define IO_IOPOLL_BATCH 8
-
-struct io_submit_state {
- /*
- * io_kiocb alloc cache
- */
- void *reqs[IO_IOPOLL_BATCH];
- unsigned int free_reqs;
- unsigned int cur_req;
-
- /*
- * File reference cache
- */
- struct file *file;
- unsigned int fd;
- unsigned int has_refs;
- unsigned int used_refs;
- unsigned int ios_left;
-};
-
struct io_op_def {
/* needs req->io allocated for deferral/async */
unsigned async_ctx : 1;
@@ -1118,11 +1121,11 @@ static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
return NULL;
}
-static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
- struct io_submit_state *state)
+static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx)
{
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
+ struct io_submit_state *state = &ctx->submit_state;
if (!state->free_reqs) {
size_t sz;
@@ -4418,10 +4421,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
return table->files[index & IORING_FILE_TABLE_MASK];;
}
-static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static int io_req_set_file(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_submit_state *state = &ctx->submit_state;
unsigned flags;
int fd;
@@ -4658,7 +4661,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)
IOSQE_IO_HARDLINK | IOSQE_ASYNC)
static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_submit_state *state, struct io_kiocb **link)
+ struct io_kiocb **link)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned int sqe_flags;
@@ -4675,7 +4678,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->flags |= sqe_flags & (IOSQE_IO_DRAIN|IOSQE_IO_HARDLINK|
IOSQE_ASYNC);
- ret = io_req_set_file(state, req, sqe);
+ ret = io_req_set_file(req, sqe);
if (unlikely(ret)) {
err_req:
io_cqring_add_event(req, ret);
@@ -4746,8 +4749,10 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/*
* Batched submission is done, ensure local IO is flushed out.
*/
-static void io_submit_state_end(struct io_submit_state *state)
+static void io_submit_end(struct io_ring_ctx *ctx)
{
+ struct io_submit_state *state = &ctx->submit_state;
+
io_file_put(state);
if (state->free_reqs)
kmem_cache_free_bulk(req_cachep, state->free_reqs,
@@ -4757,9 +4762,10 @@ static void io_submit_state_end(struct io_submit_state *state)
/*
* Start submission side cache.
*/
-static void io_submit_state_start(struct io_submit_state *state,
- unsigned int max_ios)
+static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios)
{
+ struct io_submit_state *state = &ctx->submit_state;
+
state->free_reqs = 0;
state->file = NULL;
state->ios_left = max_ios;
@@ -4826,7 +4832,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct mm_struct **mm, bool async)
{
struct blk_plug plug;
- struct io_submit_state state;
struct io_kiocb *link = NULL;
int i, submitted = 0;
bool mm_fault = false;
@@ -4844,7 +4849,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
if (!percpu_ref_tryget_many(&ctx->refs, nr))
return -EAGAIN;
- io_submit_state_start(&state, nr);
+ io_submit_start(ctx, nr);
if (nr > IO_PLUG_THRESHOLD)
blk_start_plug(&plug);
@@ -4855,7 +4860,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
const struct io_uring_sqe *sqe;
struct io_kiocb *req;
- req = io_get_req(ctx, &state);
+ req = io_get_req(ctx);
if (unlikely(!req)) {
if (!submitted)
submitted = -EAGAIN;
@@ -4888,7 +4893,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
true, async);
- if (!io_submit_sqe(req, sqe, &state, &link))
+ if (!io_submit_sqe(req, sqe, &link))
break;
}
@@ -4900,7 +4905,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
if (link)
io_queue_link_head(link);
- io_submit_state_end(&state);
+ io_submit_end(ctx);
if (nr > IO_PLUG_THRESHOLD)
blk_finish_plug(&plug);
--
2.24.0
Powered by blists - more mailing lists