[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJnrk1ZGCKdM_jK9QEbd25ikEQT7sCviaoqA6Rv_m1JjOTuEOw@mail.gmail.com>
Date: Mon, 15 Dec 2025 19:31:39 +0800
From: Joanne Koong <joannelkoong@...il.com>
To: Caleb Sander Mateos <csander@...estorage.com>
Cc: Jens Axboe <axboe@...nel.dk>, io-uring@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 1/5] io_uring: use release-acquire ordering for IORING_SETUP_R_DISABLED
On Wed, Dec 3, 2025 at 12:41 AM Caleb Sander Mateos
<csander@...estorage.com> wrote:
>
> io_uring_enter() and io_msg_ring() read ctx->flags and
> ctx->submitter_task without holding the ctx's uring_lock. This means
> they may race with the assignment to ctx->submitter_task and the
> clearing of IORING_SETUP_R_DISABLED from ctx->flags in
> io_register_enable_rings(). Ensure the correct ordering of the
> ctx->flags and ctx->submitter_task memory accesses by storing to
> ctx->flags using release ordering and loading it using acquire ordering.
>
> Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
> Fixes: 7e84e1c7566a ("io_uring: allow disabling rings during the creation")
This LGTM. But should the fixes be commit 7cae596bc31f ("io_uring:
register single issuer task at creation")? AFAICT, that's the commit
that introduces the ctx->submitter_task assignment in
io_register_enable_rings() that causes the memory reordering issue
with the unlocked read in io_uring_add_tctx_node(). I don't see this
issue in 7e84e1c7566a.
Reviewed-by: Joanne Koong <joannelkoong@...il.com>
Thanks,
Joanne
> ---
> io_uring/io_uring.c | 2 +-
> io_uring/msg_ring.c | 4 ++--
> io_uring/register.c | 2 +-
> 3 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 1e58fc1d5667..e32eb63e3cf2 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -3244,11 +3244,11 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
> goto out;
> }
>
> ctx = file->private_data;
> ret = -EBADFD;
> - if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
> + if (unlikely(smp_load_acquire(&ctx->flags) & IORING_SETUP_R_DISABLED))
> goto out;
>
> /*
> * For SQ polling, the thread will do all submissions and completions.
> * Just return the requested submit count, and wake the thread if
> diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
> index 7063ea7964e7..c48588e06bfb 100644
> --- a/io_uring/msg_ring.c
> +++ b/io_uring/msg_ring.c
> @@ -123,11 +123,11 @@ static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
>
> if (msg->src_fd || msg->flags & ~IORING_MSG_RING_FLAGS_PASS)
> return -EINVAL;
> if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
> return -EINVAL;
> - if (target_ctx->flags & IORING_SETUP_R_DISABLED)
> + if (smp_load_acquire(&target_ctx->flags) & IORING_SETUP_R_DISABLED)
> return -EBADFD;
>
> if (io_msg_need_remote(target_ctx))
> return io_msg_data_remote(target_ctx, msg);
>
> @@ -243,11 +243,11 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
>
> if (msg->len)
> return -EINVAL;
> if (target_ctx == ctx)
> return -EINVAL;
> - if (target_ctx->flags & IORING_SETUP_R_DISABLED)
> + if (smp_load_acquire(&target_ctx->flags) & IORING_SETUP_R_DISABLED)
> return -EBADFD;
> if (!msg->src_file) {
> int ret = io_msg_grab_file(req, issue_flags);
> if (unlikely(ret))
> return ret;
> diff --git a/io_uring/register.c b/io_uring/register.c
> index 62d39b3ff317..9e473c244041 100644
> --- a/io_uring/register.c
> +++ b/io_uring/register.c
> @@ -191,11 +191,11 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
> }
>
> if (ctx->restrictions.registered)
> ctx->restricted = 1;
>
> - ctx->flags &= ~IORING_SETUP_R_DISABLED;
> + smp_store_release(&ctx->flags, ctx->flags & ~IORING_SETUP_R_DISABLED);
> if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
> wake_up(&ctx->sq_data->wait);
> return 0;
> }
>
> --
> 2.45.2
>
Powered by blists - more mailing lists