[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4b8eb795-239f-4f46-af4f-7a05056ab516@kernel.dk>
Date: Wed, 27 Aug 2025 15:45:28 -0600
From: Jens Axboe <axboe@...nel.dk>
To: Qingyue Zhang <chunzhennn@...com>
Cc: io-uring@...r.kernel.org, linux-kernel@...r.kernel.org,
Suoxing Zhang <aftern00n@...com>
Subject: Re: [PATCH 2/2] io_uring/kbuf: fix infinite loop in
io_kbuf_inc_commit()
On 8/27/25 8:30 AM, Jens Axboe wrote:
> On 8/27/25 5:44 AM, Qingyue Zhang wrote:
>> In io_kbuf_inc_commit(), buf points to a user-mapped memory region,
>> which means buf->len might be changed between importing and committing.
>> Add a check to avoid infinite loop when sum of buf->len is less than
>> len.
>>
>> Co-developed-by: Suoxing Zhang <aftern00n@...com>
>> Signed-off-by: Suoxing Zhang <aftern00n@...com>
>> Signed-off-by: Qingyue Zhang <chunzhennn@...com>
>> ---
>> io_uring/kbuf.c | 9 +++++++--
>> 1 file changed, 7 insertions(+), 2 deletions(-)
>>
>> diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
>> index 81a13338dfab..80ffe6755598 100644
>> --- a/io_uring/kbuf.c
>> +++ b/io_uring/kbuf.c
>> @@ -34,11 +34,12 @@ struct io_provide_buf {
>>
>> static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
>> {
>> + struct io_uring_buf *buf, *buf_start;
>> +
>> + buf_start = buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
>> while (len) {
>> - struct io_uring_buf *buf;
>> u32 this_len;
>>
>> - buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
>> this_len = min_t(u32, len, buf->len);
>> buf->len -= this_len;
>> if (buf->len) {
>> @@ -47,6 +48,10 @@ static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
>> }
>> bl->head++;
>> len -= this_len;
>> +
>> + buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
>> + if (unlikely(buf == buf_start))
>> + break;
>> }
>> return true;
>> }
>
> Maybe I'm dense, but I don't follow this one. 'len' is passed in, and
> the only thing that should cause things to loop more than it should
> would be if we do:
>
> len -= this_len;
>
> and this_len > len;
>
> Yes, buf->len is user mapped, perhaps we just need to do:
>
> diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
> index f2d2cc319faa..569f4d957051 100644
> --- a/io_uring/kbuf.c
> +++ b/io_uring/kbuf.c
> @@ -36,15 +36,18 @@ static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
> {
> while (len) {
> struct io_uring_buf *buf;
> - u32 this_len;
> + u32 buf_len, this_len;
>
> buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
> - this_len = min_t(int, len, buf->len);
> - buf->len -= this_len;
> - if (buf->len) {
> + buf_len = READ_ONCE(buf->len);
> + this_len = min_t(int, len, buf_len);
> + buf_len -= this_len;
> + if (buf_len) {
> buf->addr += this_len;
> + buf->len = buf_len;
> return false;
> }
> + buf->len = 0;
> bl->head++;
> len -= this_len;
> }
>
> so that we operate on a local variable, and just set buf->len
> appropriate for each buffer.
I took a closer look and there's another spot where we should be
using READ_ONCE() to get the buffer length. How about something like
the below rather than the loop work-around?
commit 7f472373b2855087ae2df9dc6a923f3016a1ed21
Author: Jens Axboe <axboe@...nel.dk>
Date: Wed Aug 27 15:27:30 2025 -0600
io_uring/kbuf: always use READ_ONCE() to read ring provided buffer lengths
Since the buffers are mapped from userspace, it is prudent to use
READ_ONCE() to read the value into a local variable, and use that for
any other actions taken. Having a stable read of the buffer length
avoids worrying about it changing after checking, or being read multiple
times.
Fixes: c7fb19428d67 ("io_uring: add support for ring mapped supplied buffers")
Link: https://lore.kernel.org/io-uring/tencent_000C02641F6250C856D0C26228DE29A3D30A@qq.com/
Reported-by: Qingyue Zhang <chunzhennn@...com>
Signed-off-by: Jens Axboe <axboe@...nel.dk>
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 81a13338dfab..394037d3f2f6 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -36,15 +36,18 @@ static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
{
while (len) {
struct io_uring_buf *buf;
- u32 this_len;
+ u32 buf_len, this_len;
buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
- this_len = min_t(u32, len, buf->len);
- buf->len -= this_len;
- if (buf->len) {
+ buf_len = READ_ONCE(buf->len);
+ this_len = min_t(u32, len, buf_len);
+ buf_len -= this_len;
+ if (buf_len) {
buf->addr += this_len;
+ buf->len = buf_len;
return false;
}
+ buf->len = 0;
bl->head++;
len -= this_len;
}
@@ -159,6 +162,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
__u16 tail, head = bl->head;
struct io_uring_buf *buf;
void __user *ret;
+ u32 buf_len;
tail = smp_load_acquire(&br->tail);
if (unlikely(tail == head))
@@ -168,8 +172,9 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
req->flags |= REQ_F_BL_EMPTY;
buf = io_ring_head_to_buf(br, head, bl->mask);
- if (*len == 0 || *len > buf->len)
- *len = buf->len;
+ buf_len = READ_ONCE(buf->len);
+ if (*len == 0 || *len > buf_len)
+ *len = buf_len;
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
req->buf_list = bl;
req->buf_index = buf->bid;
@@ -265,7 +270,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
req->buf_index = buf->bid;
do {
- u32 len = buf->len;
+ u32 len = READ_ONCE(buf->len);
/* truncate end piece, if needed, for non partial buffers */
if (len > arg->max_len) {
--
Jens Axboe
Powered by blists - more mailing lists