[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260105210543.3471082-3-csander@purestorage.com>
Date: Mon, 5 Jan 2026 14:05:41 -0700
From: Caleb Sander Mateos <csander@...estorage.com>
To: Jens Axboe <axboe@...nel.dk>
Cc: Joanne Koong <joannelkoong@...il.com>,
io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org,
Caleb Sander Mateos <csander@...estorage.com>
Subject: [PATCH v7 2/3] io_uring/msg_ring: drop unnecessary submitter_task checks
__io_msg_ring_data() checks that the target_ctx isn't
IORING_SETUP_R_DISABLED before calling io_msg_data_remote(), which calls
io_msg_remote_post(). So submitter_task can't be modified concurrently
with the read in io_msg_remote_post(). Additionally, submitter_task must
exist, as io_msg_data_remote() is only called for io_msg_need_remote(),
i.e. task_complete is set, which requires IORING_SETUP_DEFER_TASKRUN,
which in turn requires IORING_SETUP_SINGLE_ISSUER. And submitter_task is
assigned in io_uring_create() or io_register_enable_rings() before
enabling any IORING_SETUP_SINGLE_ISSUER io_ring_ctx.
Similarly, io_msg_send_fd() checks IORING_SETUP_R_DISABLED and
io_msg_need_remote() before calling io_msg_fd_remote(). submitter_task
therefore can't be modified concurrently with the read in
io_msg_fd_remote() and must be non-null.
io_register_enable_rings() can't run concurrently because it's called
from io_uring_register() -> __io_uring_register() with uring_lock held.
Thus, replace the READ_ONCE() and WRITE_ONCE() of submitter_task with
plain loads and stores. And remove the NULL checks of submitter_task in
io_msg_remote_post() and io_msg_fd_remote().
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
io_uring/io_uring.c | 7 +------
io_uring/msg_ring.c | 18 +++++-------------
io_uring/register.c | 2 +-
3 files changed, 7 insertions(+), 20 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ec27fafcb213..b31d88295297 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3663,17 +3663,12 @@ static __cold int io_uring_create(struct io_ctx_config *config)
ret = -EFAULT;
goto err;
}
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
- && !(ctx->flags & IORING_SETUP_R_DISABLED)) {
- /*
- * Unlike io_register_enable_rings(), don't need WRITE_ONCE()
- * since ctx isn't yet accessible from other tasks
- */
+ && !(ctx->flags & IORING_SETUP_R_DISABLED))
ctx->submitter_task = get_task_struct(current);
- }
file = io_uring_get_file(ctx);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto err;
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 87b4d306cf1b..57ad0085869a 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -78,26 +78,21 @@ static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
kfree_rcu(req, rcu_head);
percpu_ref_put(&ctx->refs);
}
-static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
+static void io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
int res, u32 cflags, u64 user_data)
{
- if (!READ_ONCE(ctx->submitter_task)) {
- kfree_rcu(req, rcu_head);
- return -EOWNERDEAD;
- }
req->opcode = IORING_OP_NOP;
req->cqe.user_data = user_data;
io_req_set_res(req, res, cflags);
percpu_ref_get(&ctx->refs);
req->ctx = ctx;
req->tctx = NULL;
req->io_task_work.func = io_msg_tw_complete;
io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
- return 0;
}
static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
struct io_msg *msg)
{
@@ -109,12 +104,12 @@ static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
return -ENOMEM;
if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
flags = msg->cqe_flags;
- return io_msg_remote_post(target_ctx, target, msg->len, flags,
- msg->user_data);
+ io_msg_remote_post(target_ctx, target, msg->len, flags, msg->user_data);
+ return 0;
}
static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
struct io_msg *msg, unsigned int issue_flags)
{
@@ -125,11 +120,11 @@ static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
return -EINVAL;
if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
return -EINVAL;
/*
* Keep IORING_SETUP_R_DISABLED check before submitter_task load
- * in io_msg_data_remote() -> io_msg_remote_post()
+ * in io_msg_data_remote() -> io_req_task_work_add_remote()
*/
if (smp_load_acquire(&target_ctx->flags) & IORING_SETUP_R_DISABLED)
return -EBADFD;
if (io_msg_need_remote(target_ctx))
@@ -225,14 +220,11 @@ static void io_msg_tw_fd_complete(struct callback_head *head)
static int io_msg_fd_remote(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
- struct task_struct *task = READ_ONCE(ctx->submitter_task);
-
- if (unlikely(!task))
- return -EOWNERDEAD;
+ struct task_struct *task = ctx->submitter_task;
init_task_work(&msg->tw, io_msg_tw_fd_complete);
if (task_work_add(task, &msg->tw, TWA_SIGNAL))
return -EOWNERDEAD;
diff --git a/io_uring/register.c b/io_uring/register.c
index 12318c276068..8104728af294 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -179,11 +179,11 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
{
if (!(ctx->flags & IORING_SETUP_R_DISABLED))
return -EBADFD;
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
- WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
+ ctx->submitter_task = get_task_struct(current);
/*
* Lazy activation attempts would fail if it was polled before
* submitter_task is set.
*/
if (wq_has_sleeper(&ctx->poll_wq))
--
2.45.2
Powered by blists - more mailing lists