[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251121015851.3672073-12-ming.lei@redhat.com>
Date: Fri, 21 Nov 2025 09:58:33 +0800
From: Ming Lei <ming.lei@...hat.com>
To: Jens Axboe <axboe@...nel.dk>,
linux-block@...r.kernel.org
Cc: Caleb Sander Mateos <csander@...estorage.com>,
Uday Shankar <ushankar@...estorage.com>,
Stefani Seibold <stefani@...bold.net>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org,
Ming Lei <ming.lei@...hat.com>
Subject: [PATCH V4 11/27] ublk: handle UBLK_U_IO_COMMIT_IO_CMDS
Handle UBLK_U_IO_COMMIT_IO_CMDS by walking the uring_cmd fixed buffer:
- read each element into one temp buffer in batch style
- parse and apply each element for committing io result
Signed-off-by: Ming Lei <ming.lei@...hat.com>
---
drivers/block/ublk_drv.c | 117 ++++++++++++++++++++++++++++++++--
include/uapi/linux/ublk_cmd.h | 8 +++
2 files changed, 121 insertions(+), 4 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 66c77daae955..ea992366af5b 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -2098,9 +2098,9 @@ static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd
return 0;
}
-static int ublk_handle_auto_buf_reg(struct ublk_io *io,
- struct io_uring_cmd *cmd,
- u16 *buf_idx)
+static void __ublk_handle_auto_buf_reg(struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ u16 *buf_idx)
{
if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
@@ -2118,7 +2118,13 @@ static int ublk_handle_auto_buf_reg(struct ublk_io *io,
if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
*buf_idx = io->buf.auto_reg.index;
}
+}
+static int ublk_handle_auto_buf_reg(struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ u16 *buf_idx)
+{
+ __ublk_handle_auto_buf_reg(io, cmd, buf_idx);
return ublk_set_auto_buf_reg(io, cmd);
}
@@ -2553,6 +2559,17 @@ static inline __u64 ublk_batch_buf_addr(const struct ublk_batch_io *uc,
return 0;
}
+static inline __u64 ublk_batch_zone_lba(const struct ublk_batch_io *uc,
+ const struct ublk_elem_header *elem)
+{
+ const void *buf = (const void *)elem;
+
+ if (uc->flags & UBLK_BATCH_F_HAS_ZONE_LBA)
+ return *(__u64 *)(buf + sizeof(*elem) +
+ 8 * !!(uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR));
+ return -1;
+}
+
static struct ublk_auto_buf_reg
ublk_batch_auto_buf_reg(const struct ublk_batch_io *uc,
const struct ublk_elem_header *elem)
@@ -2708,6 +2725,98 @@ static int ublk_handle_batch_prep_cmd(const struct ublk_batch_io_data *data)
return ret;
}
+static int ublk_batch_commit_io_check(const struct ublk_queue *ubq,
+ struct ublk_io *io,
+ union ublk_io_buf *buf)
+{
+ struct request *req = io->req;
+
+ if (!req)
+ return -EINVAL;
+
+ if (io->flags & UBLK_IO_FLAG_ACTIVE)
+ return -EBUSY;
+
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ return -EINVAL;
+
+ if (ublk_need_map_io(ubq)) {
+ /*
+ * COMMIT_AND_FETCH_REQ has to provide IO buffer if
+ * NEED GET DATA is not enabled or it is Read IO.
+ */
+ if (!buf->addr && (!ublk_need_get_data(ubq) ||
+ req_op(req) == REQ_OP_READ))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ublk_batch_commit_io(struct ublk_queue *ubq,
+ const struct ublk_batch_io_data *data,
+ const struct ublk_elem_header *elem)
+{
+ struct ublk_io *io = &ubq->ios[elem->tag];
+ const struct ublk_batch_io *uc = &data->header;
+ u16 buf_idx = UBLK_INVALID_BUF_IDX;
+ union ublk_io_buf buf = { 0 };
+ struct request *req = NULL;
+ bool auto_reg = false;
+ bool compl = false;
+ int ret;
+
+ if (ublk_dev_support_auto_buf_reg(data->ub)) {
+ buf.auto_reg = ublk_batch_auto_buf_reg(uc, elem);
+ auto_reg = true;
+ } else if (ublk_dev_need_map_io(data->ub))
+ buf.addr = ublk_batch_buf_addr(uc, elem);
+
+ ublk_io_lock(io);
+ ret = ublk_batch_commit_io_check(ubq, io, &buf);
+ if (!ret) {
+ io->res = elem->result;
+ io->buf = buf;
+ req = ublk_fill_io_cmd(io, data->cmd);
+
+ if (auto_reg)
+ __ublk_handle_auto_buf_reg(io, data->cmd, &buf_idx);
+ compl = ublk_need_complete_req(data->ub, io);
+ }
+ ublk_io_unlock(io);
+
+ if (unlikely(ret)) {
+ pr_warn("%s: dev %u queue %u io %u: commit failure %d\n",
+ __func__, data->ub->dev_info.dev_id, ubq->q_id,
+ elem->tag, ret);
+ return ret;
+ }
+
+ /* can't touch 'ublk_io' any more */
+ if (buf_idx != UBLK_INVALID_BUF_IDX)
+ io_buffer_unregister_bvec(data->cmd, buf_idx, data->issue_flags);
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = ublk_batch_zone_lba(uc, elem);
+ if (compl)
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(data->ub));
+ return 0;
+}
+
+static int ublk_handle_batch_commit_cmd(const struct ublk_batch_io_data *data)
+{
+ const struct ublk_batch_io *uc = &data->header;
+ struct io_uring_cmd *cmd = data->cmd;
+ struct ublk_batch_io_iter iter = {
+ .uaddr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr)),
+ .total = uc->nr_elem * uc->elem_bytes,
+ .elem_bytes = uc->elem_bytes,
+ };
+ int ret;
+
+ ret = ublk_walk_cmd_buf(&iter, data, ublk_batch_commit_io);
+
+ return iter.done == 0 ? ret : iter.done;
+}
+
static int ublk_check_batch_cmd_flags(const struct ublk_batch_io *uc)
{
unsigned elem_bytes = sizeof(struct ublk_elem_header);
@@ -2783,7 +2892,7 @@ static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd,
ret = ublk_check_batch_cmd(&data);
if (ret)
goto out;
- ret = -EOPNOTSUPP;
+ ret = ublk_handle_batch_commit_cmd(&data);
break;
default:
ret = -EOPNOTSUPP;
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index c96c299057c3..295ec8f34173 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -109,6 +109,14 @@
*/
#define UBLK_U_IO_PREP_IO_CMDS \
_IOWR('u', 0x25, struct ublk_batch_io)
+/*
+ * If failure code is returned, nothing in the command buffer is handled.
+ * Otherwise, the returned value means how many bytes in command buffer
+ * are handled actually, then number of handled IOs can be calculated with
+ * `elem_bytes` for each IO. IOs in the remained bytes are not committed,
+ * userspace has to check return value for dealing with partial committing
+ * correctly.
+ */
#define UBLK_U_IO_COMMIT_IO_CMDS \
_IOWR('u', 0x26, struct ublk_batch_io)
--
2.47.0
Powered by blists - more mailing lists