[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a8a832b9-bfa6-4c1e-bdcc-a89467add5d1@kernel.dk>
Date: Mon, 29 Dec 2025 17:23:49 -0700
From: Jens Axboe <axboe@...nel.dk>
To: Alexandre Negrel <alexandre@...rel.dev>, io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] io_uring: make overflowing cqe subject to OOM
On 12/29/25 1:19 PM, Alexandre Negrel wrote:
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 6cb24cdf8e68..5ff1a13fed1c 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -545,31 +545,12 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
> io_eventfd_signal(ctx, true);
> }
>
> -static inline void __io_cq_lock(struct io_ring_ctx *ctx)
> -{
> - if (!ctx->lockless_cq)
> - spin_lock(&ctx->completion_lock);
> -}
> -
> static inline void io_cq_lock(struct io_ring_ctx *ctx)
> __acquires(ctx->completion_lock)
> {
> spin_lock(&ctx->completion_lock);
> }
>
> -static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
> -{
> - io_commit_cqring(ctx);
> - if (!ctx->task_complete) {
> - if (!ctx->lockless_cq)
> - spin_unlock(&ctx->completion_lock);
> - /* IOPOLL rings only need to wake up if it's also SQPOLL */
> - if (!ctx->syscall_iopoll)
> - io_cqring_wake(ctx);
> - }
> - io_commit_cqring_flush(ctx);
> -}
> -
> static void io_cq_unlock_post(struct io_ring_ctx *ctx)
> __releases(ctx->completion_lock)
> {
> @@ -1513,7 +1494,6 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
> struct io_submit_state *state = &ctx->submit_state;
> struct io_wq_work_node *node;
>
> - __io_cq_lock(ctx);
> __wq_list_for_each(node, &state->compl_reqs) {
> struct io_kiocb *req = container_of(node, struct io_kiocb,
> comp_list);
> @@ -1525,13 +1505,17 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
> */
> if (!(req->flags & (REQ_F_CQE_SKIP | REQ_F_REISSUE)) &&
> unlikely(!io_fill_cqe_req(ctx, req))) {
> - if (ctx->lockless_cq)
> - io_cqe_overflow(ctx, &req->cqe, &req->big_cqe);
> - else
> - io_cqe_overflow_locked(ctx, &req->cqe, &req->big_cqe);
> + io_cqe_overflow(ctx, &req->cqe, &req->big_cqe);
> }
> }
> - __io_cq_unlock_post(ctx);
> +
> + io_commit_cqring(ctx);
> + if (!ctx->task_complete) {
> + /* IOPOLL rings only need to wake up if it's also SQPOLL */
> + if (!ctx->syscall_iopoll)
> + io_cqring_wake(ctx);
> + }
> + io_commit_cqring_flush(ctx);
>
> if (!wq_list_empty(&state->compl_reqs)) {
> io_free_batch_list(ctx, state->compl_reqs.first);
You seem to just remove the lock around posting CQEs, and hence then it
can use GFP_KERNEL? That's very broken... I'm assuming the issue here is
that memcg will look at __GFP_HIGH somehow and allow it to proceed?
Surely that should not stop OOM, just defer it?
In any case, then below should then do the same. Can you test?
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 6cb24cdf8e68..709943fedaf4 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -864,7 +864,7 @@ static __cold bool io_cqe_overflow_locked(struct io_ring_ctx *ctx,
{
struct io_overflow_cqe *ocqe;
- ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_ATOMIC);
+ ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_NOWAIT);
return io_cqring_add_overflow(ctx, ocqe);
}
--
Jens Axboe
Powered by blists - more mailing lists