[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f1522c5d-febf-4e51-b534-c0ffa719d555@veygax.dev>
Date: Thu, 18 Dec 2025 00:37:47 +0000
From: veygax <veyga@...gax.dev>
To: Ming Lei <ming.lei@...hat.com>
Cc: Jens Axboe <axboe@...nel.dk>, "io-uring@...r.kernel.org" <io-uring@...r.kernel.org>, Caleb Sander Mateos <csander@...estorage.com>, "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] io_uring/rsrc: fix slab-out-of-bounds in io_buffer_register_bvec
On 18/12/2025 00:32, Ming Lei wrote:
> Can you share the test case so that we can understand why page isn't merged
> to last bvec? Maybe there is chance to improve block layer(bio add page
> related code)
Sure, this is how i triggered it:
#include <kunit/test.h>
#include <linux/io_uring.h>
#include <linux/io_uring_types.h>
#include <linux/io_uring/cmd.h>
#include <linux/blk-mq.h>
#include <linux/bio.h>
#include <linux/bvec.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "io_uring.h"
#include "rsrc.h"
static void dummy_release(void *priv)
{
}
static void io_buffer_register_bvec_overflow_test(struct kunit *test)
{
struct io_ring_ctx *ctx;
struct io_uring_cmd *cmd;
struct io_kiocb *req;
struct request *rq;
struct bio *bio;
struct page *page;
int i, ret;
/*
* IO_CACHED_BVECS_SEGS is 32.
* We want more than 32 bvecs to trigger overflow if allocation uses 32.
*/
int num_bvecs = 40;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ctx);
/* Initialize caches so io_alloc_imu works and knows the size */
if (io_rsrc_cache_init(ctx))
kunit_skip(test, "failed to init rsrc cache");
/* Initialize buf_table so index check passes */
ret = io_rsrc_data_alloc(&ctx->buf_table, 1);
KUNIT_ASSERT_EQ(test, ret, 0);
req = kunit_kzalloc(test, sizeof(*req), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req);
req->ctx = ctx;
cmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
rq = kunit_kzalloc(test, sizeof(*rq), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, rq);
/* Allocate bio with enough slots */
bio = bio_kmalloc(num_bvecs, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, bio);
bio_init(bio, NULL, bio_inline_vecs(bio), num_bvecs, REQ_OP_WRITE);
rq->bio = bio;
page = alloc_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO, 6);
KUNIT_ASSERT_NOT_NULL(test, page);
/*
* Add pages to bio manually.
* We use physically contiguous pages to trick blk_rq_nr_phys_segments
* into returning 1 segment.
* We use multiple bvec entries to trick the loop in
io_buffer_register_bvec
* into writing out of bounds.
*/
for (i = 0; i < num_bvecs; i++) {
struct bio_vec *bv = &bio->bi_io_vec[i];
bv->bv_page = page + i;
bv->bv_len = PAGE_SIZE;
bv->bv_offset = 0;
bio->bi_vcnt++;
bio->bi_iter.bi_size += PAGE_SIZE;
}
/* Trigger */
ret = io_buffer_register_bvec(cmd, rq, dummy_release, 0, 0);
/* this should not be reachable */
__free_pages(page, 6);
kfree(bio);
io_rsrc_data_free(ctx, &ctx->buf_table);
io_rsrc_cache_free(ctx);
}
static struct kunit_case io_uring_rsrc_test_cases[] = {
KUNIT_CASE(io_buffer_register_bvec_overflow_test),
{}
};
static struct kunit_suite io_uring_rsrc_test_suite = {
.name = "io_uring_rsrc_test",
.test_cases = io_uring_rsrc_test_cases,
};
kunit_test_suite(io_uring_rsrc_test_suite);
MODULE_LICENSE("GPL");
--
- Evan Lambert / veygax
Powered by blists - more mailing lists