[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251121015851.3672073-10-ming.lei@redhat.com>
Date: Fri, 21 Nov 2025 09:58:31 +0800
From: Ming Lei <ming.lei@...hat.com>
To: Jens Axboe <axboe@...nel.dk>,
linux-block@...r.kernel.org
Cc: Caleb Sander Mateos <csander@...estorage.com>,
Uday Shankar <ushankar@...estorage.com>,
Stefani Seibold <stefani@...bold.net>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org,
Ming Lei <ming.lei@...hat.com>
Subject: [PATCH V4 09/27] ublk: add new batch command UBLK_U_IO_PREP_IO_CMDS & UBLK_U_IO_COMMIT_IO_CMDS
Add new command UBLK_U_IO_PREP_IO_CMDS, which is the batch version of
UBLK_IO_FETCH_REQ.
Add new command UBLK_U_IO_COMMIT_IO_CMDS, which is for committing io command
result only, still the batch version.
The new command header type is `struct ublk_batch_io`.
This patch doesn't actually implement these commands yet, just validates the
SQE fields.
Signed-off-by: Ming Lei <ming.lei@...hat.com>
---
drivers/block/ublk_drv.c | 85 ++++++++++++++++++++++++++++++++++-
include/uapi/linux/ublk_cmd.h | 49 ++++++++++++++++++++
2 files changed, 133 insertions(+), 1 deletion(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index c62b2f2057fe..21890947ceec 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -85,6 +85,11 @@
UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
+#define UBLK_BATCH_F_ALL \
+ (UBLK_BATCH_F_HAS_ZONE_LBA | \
+ UBLK_BATCH_F_HAS_BUF_ADDR | \
+ UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK)
+
struct ublk_uring_cmd_pdu {
/*
* Store requests in same batch temporarily for queuing them to
@@ -108,6 +113,12 @@ struct ublk_uring_cmd_pdu {
u16 tag;
};
+struct ublk_batch_io_data {
+ struct ublk_device *ub;
+ struct io_uring_cmd *cmd;
+ struct ublk_batch_io header;
+};
+
/*
* io command is active: sqe cmd is received, and its cqe isn't done
*
@@ -2520,10 +2531,82 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return ublk_ch_uring_cmd_local(cmd, issue_flags);
}
+static int ublk_check_batch_cmd_flags(const struct ublk_batch_io *uc)
+{
+ unsigned elem_bytes = sizeof(struct ublk_elem_header);
+
+ if (uc->flags & ~UBLK_BATCH_F_ALL)
+ return -EINVAL;
+
+ /* UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK requires buffer index */
+ if ((uc->flags & UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK) &&
+ (uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR))
+ return -EINVAL;
+
+ elem_bytes += (uc->flags & UBLK_BATCH_F_HAS_ZONE_LBA ? sizeof(u64) : 0) +
+ (uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR ? sizeof(u64) : 0);
+ if (uc->elem_bytes != elem_bytes)
+ return -EINVAL;
+ return 0;
+}
+
+static int ublk_check_batch_cmd(const struct ublk_batch_io_data *data)
+{
+
+ const struct ublk_batch_io *uc = &data->header;
+
+ if (uc->nr_elem > data->ub->dev_info.queue_depth)
+ return -E2BIG;
+
+ if ((uc->flags & UBLK_BATCH_F_HAS_ZONE_LBA) &&
+ !ublk_dev_is_zoned(data->ub))
+ return -EINVAL;
+
+ if ((uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR) &&
+ !ublk_dev_need_map_io(data->ub))
+ return -EINVAL;
+
+ if ((uc->flags & UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK) &&
+ !ublk_dev_support_auto_buf_reg(data->ub))
+ return -EINVAL;
+
+ return ublk_check_batch_cmd_flags(uc);
+}
+
static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
- return -EOPNOTSUPP;
+ const struct ublk_batch_io *uc = io_uring_sqe_cmd(cmd->sqe);
+ struct ublk_device *ub = cmd->file->private_data;
+ struct ublk_batch_io_data data = {
+ .ub = ub,
+ .cmd = cmd,
+ .header = (struct ublk_batch_io) {
+ .q_id = READ_ONCE(uc->q_id),
+ .flags = READ_ONCE(uc->flags),
+ .nr_elem = READ_ONCE(uc->nr_elem),
+ .elem_bytes = READ_ONCE(uc->elem_bytes),
+ },
+ };
+ u32 cmd_op = cmd->cmd_op;
+ int ret = -EINVAL;
+
+ if (data.header.q_id >= ub->dev_info.nr_hw_queues)
+ goto out;
+
+ switch (cmd_op) {
+ case UBLK_U_IO_PREP_IO_CMDS:
+ case UBLK_U_IO_COMMIT_IO_CMDS:
+ ret = ublk_check_batch_cmd(&data);
+ if (ret)
+ goto out;
+ ret = -EOPNOTSUPP;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+out:
+ return ret;
}
static inline bool ublk_check_ubuf_dir(const struct request *req,
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index ec77dabba45b..2ce5a496b622 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -102,6 +102,10 @@
_IOWR('u', 0x23, struct ublksrv_io_cmd)
#define UBLK_U_IO_UNREGISTER_IO_BUF \
_IOWR('u', 0x24, struct ublksrv_io_cmd)
+#define UBLK_U_IO_PREP_IO_CMDS \
+ _IOWR('u', 0x25, struct ublk_batch_io)
+#define UBLK_U_IO_COMMIT_IO_CMDS \
+ _IOWR('u', 0x26, struct ublk_batch_io)
/* only ABORT means that no re-fetch */
#define UBLK_IO_RES_OK 0
@@ -525,6 +529,51 @@ struct ublksrv_io_cmd {
};
};
+struct ublk_elem_header {
+ __u16 tag; /* IO tag */
+
+ /*
+ * Buffer index for incoming io command, only valid iff
+ * UBLK_F_AUTO_BUF_REG is set
+ */
+ __u16 buf_index;
+ __s32 result; /* I/O completion result (commit only) */
+};
+
+/*
+ * uring_cmd buffer structure for batch commands
+ *
+ * buffer includes multiple elements, which number is specified by
+ * `nr_elem`. Each element buffer is organized in the following order:
+ *
+ * struct ublk_elem_buffer {
+ * // Mandatory fields (8 bytes)
+ * struct ublk_elem_header header;
+ *
+ * // Optional fields (8 bytes each, included based on flags)
+ *
+ * // Buffer address (if UBLK_BATCH_F_HAS_BUF_ADDR) for copying data
+ * // between ublk request and ublk server buffer
+ * __u64 buf_addr;
+ *
+ * // returned Zone append LBA (if UBLK_BATCH_F_HAS_ZONE_LBA)
+ * __u64 zone_lba;
+ * }
+ *
+ * Used for `UBLK_U_IO_PREP_IO_CMDS` and `UBLK_U_IO_COMMIT_IO_CMDS`
+ */
+struct ublk_batch_io {
+ __u16 q_id;
+#define UBLK_BATCH_F_HAS_ZONE_LBA (1 << 0)
+#define UBLK_BATCH_F_HAS_BUF_ADDR (1 << 1)
+#define UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK (1 << 2)
+ __u16 flags;
+ __u16 nr_elem;
+ __u8 elem_bytes;
+ __u8 reserved;
+ __u64 reserved2;
+};
+
struct ublk_param_basic {
#define UBLK_ATTR_READ_ONLY (1 << 0)
#define UBLK_ATTR_ROTATIONAL (1 << 1)
--
2.47.0
Powered by blists - more mailing lists