[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <ca4a431ae9c57c06b026bdd01e7bca90488fbcf9.1680187408.git.asml.silence@gmail.com>
Date: Thu, 30 Mar 2023 15:53:26 +0100
From: Pavel Begunkov <asml.silence@...il.com>
To: io-uring@...r.kernel.org
Cc: Jens Axboe <axboe@...nel.dk>, asml.silence@...il.com,
linux-kernel@...r.kernel.org
Subject: [PATCH 08/11] io_uring/rsrc: optimise io_rsrc_put allocation
Every io_rsrc_node keeps a list of items to put, and all entries are
kmalloc()'ed. However, it's quite often to queue up only one entry per
node, so let's add an inline entry there to avoid extra allocations.
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
io_uring/rsrc.c | 51 ++++++++++++++++++++++++++++++++-----------------
io_uring/rsrc.h | 2 ++
2 files changed, 36 insertions(+), 17 deletions(-)
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 10006fb169d2..95e71300bb35 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -140,26 +140,34 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
*slot = NULL;
}
+static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data,
+ struct io_rsrc_put *prsrc)
+{
+ struct io_ring_ctx *ctx = rsrc_data->ctx;
+
+ if (prsrc->tag) {
+ if (ctx->flags & IORING_SETUP_IOPOLL) {
+ mutex_lock(&ctx->uring_lock);
+ io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
+ mutex_unlock(&ctx->uring_lock);
+ } else {
+ io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
+ }
+ }
+ rsrc_data->do_put(ctx, prsrc);
+}
+
static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
{
struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
- struct io_ring_ctx *ctx = rsrc_data->ctx;
struct io_rsrc_put *prsrc, *tmp;
+ if (ref_node->inline_items)
+ io_rsrc_put_work_one(rsrc_data, &ref_node->item);
+
list_for_each_entry_safe(prsrc, tmp, &ref_node->item_list, list) {
list_del(&prsrc->list);
-
- if (prsrc->tag) {
- if (ctx->flags & IORING_SETUP_IOPOLL) {
- mutex_lock(&ctx->uring_lock);
- io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
- mutex_unlock(&ctx->uring_lock);
- } else {
- io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
- }
- }
-
- rsrc_data->do_put(ctx, prsrc);
+ io_rsrc_put_work_one(rsrc_data, prsrc);
kfree(prsrc);
}
@@ -251,6 +259,7 @@ static struct io_rsrc_node *io_rsrc_node_alloc(void)
INIT_LIST_HEAD(&ref_node->node);
INIT_LIST_HEAD(&ref_node->item_list);
ref_node->done = false;
+ ref_node->inline_items = 0;
return ref_node;
}
@@ -729,15 +738,23 @@ int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
{
u64 *tag_slot = io_get_tag_slot(data, idx);
struct io_rsrc_put *prsrc;
+ bool inline_item = true;
- prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
- if (!prsrc)
- return -ENOMEM;
+ if (!node->inline_items) {
+ prsrc = &node->item;
+ node->inline_items++;
+ } else {
+ prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+ if (!prsrc)
+ return -ENOMEM;
+ inline_item = false;
+ }
prsrc->tag = *tag_slot;
*tag_slot = 0;
prsrc->rsrc = rsrc;
- list_add(&prsrc->list, &node->item_list);
+ if (!inline_item)
+ list_add(&prsrc->list, &node->item_list);
return 0;
}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index c68846de031f..17293ab90f64 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -49,6 +49,8 @@ struct io_rsrc_node {
* came from the same table and so are of the same type.
*/
struct list_head item_list;
+ struct io_rsrc_put item;
+ int inline_items;
};
struct io_mapped_ubuf {
--
2.39.1
Powered by blists - more mailing lists