[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251121015851.3672073-19-ming.lei@redhat.com>
Date: Fri, 21 Nov 2025 09:58:40 +0800
From: Ming Lei <ming.lei@...hat.com>
To: Jens Axboe <axboe@...nel.dk>,
linux-block@...r.kernel.org
Cc: Caleb Sander Mateos <csander@...estorage.com>,
Uday Shankar <ushankar@...estorage.com>,
Stefani Seibold <stefani@...bold.net>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org,
Ming Lei <ming.lei@...hat.com>
Subject: [PATCH V4 18/27] ublk: implement batch request completion via blk_mq_end_request_batch()
Reduce overhead when completing multiple requests in batch I/O mode by
accumulating them in an io_comp_batch structure and completing them
together via blk_mq_end_request_batch(). This minimizes per-request
completion overhead and improves performance for high IOPS workloads.
The implementation adds an io_comp_batch pointer to struct ublk_io and
initializes it in __ublk_fetch(). For batch I/O, the pointer is set to
the batch structure in ublk_batch_commit_io(). The __ublk_complete_rq()
function uses io->iob to call blk_mq_add_to_batch() for batch mode.
After processing all batch I/Os, the completion callback is invoked in
ublk_handle_batch_commit_cmd() to complete all accumulated requests
efficiently.
So far just covers direct completion. For deferred completion(zero copy,
auto buffer reg), ublk_io_release() is often delayed in freeing buffer
consumer io_uring request's code path, so this patch often doesn't work,
also it is hard to pass the per-task 'struct io_comp_batch' for deferred
completion.
Signed-off-by: Ming Lei <ming.lei@...hat.com>
---
drivers/block/ublk_drv.c | 30 ++++++++++++++++++++++--------
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 90cd1863bc83..a5606c7111a4 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -130,6 +130,7 @@ struct ublk_batch_io_data {
struct io_uring_cmd *cmd;
struct ublk_batch_io header;
unsigned int issue_flags;
+ struct io_comp_batch *iob;
};
/*
@@ -642,7 +643,12 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
- bool need_map);
+ bool need_map, struct io_comp_batch *iob);
+
+static void ublk_complete_batch(struct io_comp_batch *iob)
+{
+ blk_mq_end_request_batch(iob);
+}
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -912,7 +918,7 @@ static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
return;
/* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */
- __ublk_complete_rq(req, io, false);
+ __ublk_complete_rq(req, io, false, NULL);
}
static inline bool ublk_sub_req_ref(struct ublk_io *io)
@@ -1251,7 +1257,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
/* todo: handle partial completion */
static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
- bool need_map)
+ bool need_map, struct io_comp_batch *iob)
{
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
@@ -1288,8 +1294,11 @@ static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
if (blk_update_request(req, BLK_STS_OK, io->res))
blk_mq_requeue_request(req, true);
- else if (likely(!blk_should_fake_timeout(req->q)))
+ else if (likely(!blk_should_fake_timeout(req->q))) {
+ if (blk_mq_add_to_batch(req, iob, false, ublk_complete_batch))
+ return;
__blk_mq_end_request(req, BLK_STS_OK);
+ }
return;
exit:
@@ -2249,7 +2258,7 @@ static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
blk_mq_requeue_request(req, false);
else {
io->res = -EIO;
- __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub), NULL);
}
}
@@ -2986,7 +2995,7 @@ static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
if (req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = addr;
if (compl)
- __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub), NULL);
if (ret)
goto out;
@@ -3321,11 +3330,11 @@ static int ublk_batch_commit_io(struct ublk_queue *ubq,
if (req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = ublk_batch_zone_lba(uc, elem);
if (compl)
- __ublk_complete_rq(req, io, ublk_dev_need_map_io(data->ub));
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(data->ub), data->iob);
return 0;
}
-static int ublk_handle_batch_commit_cmd(const struct ublk_batch_io_data *data)
+static int ublk_handle_batch_commit_cmd(struct ublk_batch_io_data *data)
{
const struct ublk_batch_io *uc = &data->header;
struct io_uring_cmd *cmd = data->cmd;
@@ -3334,10 +3343,15 @@ static int ublk_handle_batch_commit_cmd(const struct ublk_batch_io_data *data)
.total = uc->nr_elem * uc->elem_bytes,
.elem_bytes = uc->elem_bytes,
};
+ DEFINE_IO_COMP_BATCH(iob);
int ret;
+ data->iob = &iob;
ret = ublk_walk_cmd_buf(&iter, data, ublk_batch_commit_io);
+ if (iob.complete)
+ iob.complete(&iob);
+
return iter.done == 0 ? ret : iter.done;
}
--
2.47.0
Powered by blists - more mailing lists