[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALWNXx-gGbAFCNywLZZUNmenTHQGFuapvNe-7irRGnRFJuNUcA@mail.gmail.com>
Date: Sat, 11 Oct 2025 09:36:40 +0800
From: fengnan chang <fengnanchang@...il.com>
To: Fengnan Chang <changfengnan@...edance.com>
Cc: axboe@...nel.dk, viro@...iv.linux.org.uk, brauner@...nel.org, jack@...e.cz,
asml.silence@...il.com, willy@...radead.org, djwong@...nel.org,
hch@...radead.org, ritesh.list@...il.com, linux-fsdevel@...r.kernel.org,
io-uring@...r.kernel.org, linux-xfs@...r.kernel.org,
linux-ext4@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] block: enable per-cpu bio cache by default
The attachment is result of fio test ext4/xfs with
libaio/sync/io_uring on null_blk and
nvme.
On Sat, Oct 11, 2025 at 9:33 AM Fengnan Chang
<changfengnan@...edance.com> wrote:
>
> Per cpu bio cache was only used in the io_uring + raw block device,
> after commit 12e4e8c7ab59 ("io_uring/rw: enable bio caches for IRQ
> rw"), bio_put is safe for task and irq context, bio_alloc_bioset is
> safe for task context and no one calls in irq context, so we can enable
> per cpu bio cache by default.
>
> Benchmarked with t/io_uring and ext4+nvme:
> taskset -c 6 /root/fio/t/io_uring -p0 -d128 -b4096 -s1 -c1 -F1 -B1 -R1
> -X1 -n1 -P1 /mnt/testfile
> base IOPS is 562K, patch IOPS is 574K. The CPU usage of bio_alloc_bioset
> decrease from 1.42% to 1.22%.
>
> The worst case is allocate bio in CPU A but free in CPU B, still use
> t/io_uring and ext4+nvme:
> base IOPS is 648K, patch IOPS is 647K.
>
> Also use fio test ext4/xfs with libaio/sync/io_uring on null_blk and
> nvme, no obvious performance regression.
>
> Signed-off-by: Fengnan Chang <changfengnan@...edance.com>
> ---
> block/bio.c | 26 ++++++++++++--------------
> block/blk-map.c | 4 ++++
> block/fops.c | 4 ----
> include/linux/fs.h | 3 ---
> io_uring/rw.c | 1 -
> 5 files changed, 16 insertions(+), 22 deletions(-)
>
> diff --git a/block/bio.c b/block/bio.c
> index 3b371a5da159..16b20c10cab7 100644
> --- a/block/bio.c
> +++ b/block/bio.c
> @@ -513,20 +513,18 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
> if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
> return NULL;
>
> - if (opf & REQ_ALLOC_CACHE) {
> - if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
> - bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
> - gfp_mask, bs);
> - if (bio)
> - return bio;
> - /*
> - * No cached bio available, bio returned below marked with
> - * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
> - */
> - } else {
> - opf &= ~REQ_ALLOC_CACHE;
> - }
> - }
> + opf |= REQ_ALLOC_CACHE;
> + if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
> + bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
> + gfp_mask, bs);
> + if (bio)
> + return bio;
> + /*
> + * No cached bio available, bio returned below marked with
> + * REQ_ALLOC_CACHE to participate in per-cpu alloc cache.
> + */
> + } else
> + opf &= ~REQ_ALLOC_CACHE;
>
> /*
> * submit_bio_noacct() converts recursion to iteration; this means if
> diff --git a/block/blk-map.c b/block/blk-map.c
> index 23e5d5ebe59e..570a7ca6edd1 100644
> --- a/block/blk-map.c
> +++ b/block/blk-map.c
> @@ -255,6 +255,10 @@ static struct bio *blk_rq_map_bio_alloc(struct request *rq,
> {
> struct bio *bio;
>
> + /*
> + * Even REQ_ALLOC_CACHE is enabled by default, we still need this to
> + * mark bio is allocated by bio_alloc_bioset.
> + */
> if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
> bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
> &fs_bio_set);
> diff --git a/block/fops.c b/block/fops.c
> index ddbc69c0922b..090562a91b4c 100644
> --- a/block/fops.c
> +++ b/block/fops.c
> @@ -177,8 +177,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
> loff_t pos = iocb->ki_pos;
> int ret = 0;
>
> - if (iocb->ki_flags & IOCB_ALLOC_CACHE)
> - opf |= REQ_ALLOC_CACHE;
> bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
> &blkdev_dio_pool);
> dio = container_of(bio, struct blkdev_dio, bio);
> @@ -326,8 +324,6 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
> loff_t pos = iocb->ki_pos;
> int ret = 0;
>
> - if (iocb->ki_flags & IOCB_ALLOC_CACHE)
> - opf |= REQ_ALLOC_CACHE;
> bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
> &blkdev_dio_pool);
> dio = container_of(bio, struct blkdev_dio, bio);
> diff --git a/include/linux/fs.h b/include/linux/fs.h
> index 601d036a6c78..18ec41732186 100644
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -365,8 +365,6 @@ struct readahead_control;
> /* iocb->ki_waitq is valid */
> #define IOCB_WAITQ (1 << 19)
> #define IOCB_NOIO (1 << 20)
> -/* can use bio alloc cache */
> -#define IOCB_ALLOC_CACHE (1 << 21)
> /*
> * IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the
> * iocb completion can be passed back to the owner for execution from a safe
> @@ -399,7 +397,6 @@ struct readahead_control;
> { IOCB_WRITE, "WRITE" }, \
> { IOCB_WAITQ, "WAITQ" }, \
> { IOCB_NOIO, "NOIO" }, \
> - { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
> { IOCB_DIO_CALLER_COMP, "CALLER_COMP" }, \
> { IOCB_AIO_RW, "AIO_RW" }, \
> { IOCB_HAS_METADATA, "AIO_HAS_METADATA" }
> diff --git a/io_uring/rw.c b/io_uring/rw.c
> index af5a54b5db12..fa7655ab9097 100644
> --- a/io_uring/rw.c
> +++ b/io_uring/rw.c
> @@ -856,7 +856,6 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
> ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
> if (unlikely(ret))
> return ret;
> - kiocb->ki_flags |= IOCB_ALLOC_CACHE;
>
> /*
> * If the file is marked O_NONBLOCK, still allow retry for it if it
> --
> 2.39.5 (Apple Git-154)
>
>
View attachment "nullblk_fs_results.csv" of type "text/csv" (13501 bytes)
View attachment "fio_results.csv" of type "text/csv" (12529 bytes)
Powered by blists - more mailing lists