[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250217022511.1150145-2-csander@purestorage.com>
Date: Sun, 16 Feb 2025 19:25:05 -0700
From: Caleb Sander Mateos <csander@...estorage.com>
To: Jens Axboe <axboe@...nel.dk>,
Pavel Begunkov <asml.silence@...il.com>
Cc: Caleb Sander Mateos <csander@...estorage.com>,
io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 2/2] io_uring: pass struct io_tw_state by value
8e5b3b89ecaf ("io_uring: remove struct io_tw_state::locked") removed the
only field of io_tw_state but kept it as a task work callback argument
to "forc[e] users not to invoke them carelessly out of a wrong context".
Passing the struct io_tw_state * argument adds a few instructions to all
callers that can't inline the functions and see the argument is unused.
So pass struct io_tw_state by value instead. Since it's a 0-sized value,
it can be passed without any instructions needed to initialize it.
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
include/linux/io_uring_types.h | 2 +-
io_uring/io_uring.c | 14 +++++++-------
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index ea4694ee9d19..123e69368730 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -442,11 +442,11 @@ struct io_ring_ctx {
* ONLY core io_uring.c should instantiate this struct.
*/
struct io_tw_state {
};
/* Alias to use in code that doesn't instantiate struct io_tw_state */
-typedef struct io_tw_state *io_tw_token_t;
+typedef struct io_tw_state io_tw_token_t;
enum {
REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index b44ff8871725..b688953d1de8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -253,11 +253,11 @@ static __cold void io_fallback_req_func(struct work_struct *work)
struct io_tw_state ts = {};
percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
- req->io_task_work.func(req, &ts);
+ req->io_task_work.func(req, ts);
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
}
@@ -1050,28 +1050,28 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
struct llist_node *next = node->next;
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
if (req->ctx != ctx) {
- ctx_flush_and_put(ctx, &ts);
+ ctx_flush_and_put(ctx, ts);
ctx = req->ctx;
mutex_lock(&ctx->uring_lock);
percpu_ref_get(&ctx->refs);
}
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
- req, &ts);
+ req, ts);
node = next;
(*count)++;
if (unlikely(need_resched())) {
- ctx_flush_and_put(ctx, &ts);
+ ctx_flush_and_put(ctx, ts);
ctx = NULL;
cond_resched();
}
} while (node && *count < max_entries);
- ctx_flush_and_put(ctx, &ts);
+ ctx_flush_and_put(ctx, ts);
return node;
}
static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
{
@@ -1339,22 +1339,22 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
{
struct io_tw_state ts = {};
if (!io_local_work_pending(ctx))
return 0;
- return __io_run_local_work(ctx, &ts, min_events,
+ return __io_run_local_work(ctx, ts, min_events,
max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
}
static int io_run_local_work(struct io_ring_ctx *ctx, int min_events,
int max_events)
{
struct io_tw_state ts = {};
int ret;
mutex_lock(&ctx->uring_lock);
- ret = __io_run_local_work(ctx, &ts, min_events, max_events);
+ ret = __io_run_local_work(ctx, ts, min_events, max_events);
mutex_unlock(&ctx->uring_lock);
return ret;
}
static void io_req_task_cancel(struct io_kiocb *req, io_tw_token_t tw)
--
2.45.2
Powered by blists - more mailing lists