[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <229f00564ac243cee0bfef8372a8581efedc0da2.1576944502.git.asml.silence@gmail.com>
Date: Sat, 21 Dec 2019 19:15:08 +0300
From: Pavel Begunkov <asml.silence@...il.com>
To: Jens Axboe <axboe@...nel.dk>, io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: Tejun Heo <tj@...nel.org>, Dennis Zhou <dennis@...nel.org>,
Christoph Lameter <cl@...ux.com>
Subject: [PATCH v2 2/3] io_uring: batch getting pcpu references
percpu_ref_tryget() has its own overhead. Instead getting a reference
for each request, grab a bunch once per io_submit_sqes().
basic benchmark with submit and wait 128 non-linked nops showed ~5%
performance gain. (7044 KIOPS vs 7423)
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
fs/io_uring.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 513f1922ce6a..5392134f042f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1045,9 +1045,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
- if (!percpu_ref_tryget(&ctx->refs))
- return NULL;
-
if (!state) {
req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
@@ -4391,6 +4388,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
int i, submitted = 0;
+ unsigned int extra_refs;
bool mm_fault = false;
/* if we have a backlog and couldn't flush it all, return BUSY */
@@ -4400,6 +4398,10 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
return -EBUSY;
}
+ if (!percpu_ref_tryget_many(&ctx->refs, nr))
+ return -EAGAIN;
+ extra_refs = nr;
+
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, nr);
statep = &state;
@@ -4415,6 +4417,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
submitted = -EAGAIN;
break;
}
+ --extra_refs;
if (!io_get_sqring(ctx, req, &sqe)) {
__io_free_req(req);
break;
@@ -4451,6 +4454,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
io_queue_link_head(link);
if (statep)
io_submit_state_end(&state);
+ if (extra_refs)
+ percpu_ref_put_many(&ctx->refs, extra_refs);
/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);
--
2.24.0
Powered by blists - more mailing lists