[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251217053455.281509-17-csander@purestorage.com>
Date: Tue, 16 Dec 2025 22:34:50 -0700
From: Caleb Sander Mateos <csander@...estorage.com>
To: Ming Lei <ming.lei@...hat.com>,
Jens Axboe <axboe@...nel.dk>,
Shuah Khan <shuah@...nel.org>
Cc: linux-block@...r.kernel.org,
linux-kselftest@...r.kernel.org,
linux-kernel@...r.kernel.org,
Stanley Zhang <stazhang@...estorage.com>,
Uday Shankar <ushankar@...estorage.com>,
Caleb Sander Mateos <csander@...estorage.com>
Subject: [PATCH 16/20] selftests: ublk: implement integrity user copy in kublk
If integrity data is enabled for kublk, allocate an integrity buffer for
each I/O. Extend ublk_user_copy() to copy the integrity data between the
ublk request and the integrity buffer if the ublksrv_io_desc indicates
that the request has integrity data.
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
tools/testing/selftests/ublk/kublk.c | 41 ++++++++++++++++++++++++----
tools/testing/selftests/ublk/kublk.h | 14 ++++++++++
2 files changed, 50 insertions(+), 5 deletions(-)
diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c
index cf58d3b60ace..a5c0e4ff9e2a 100644
--- a/tools/testing/selftests/ublk/kublk.c
+++ b/tools/testing/selftests/ublk/kublk.c
@@ -414,12 +414,14 @@ static void ublk_queue_deinit(struct ublk_queue *q)
int nr_ios = q->q_depth;
if (q->io_cmd_buf)
munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q));
- for (i = 0; i < nr_ios; i++)
+ for (i = 0; i < nr_ios; i++) {
free(q->ios[i].buf_addr);
+ free(q->ios[i].integrity_buf);
+ }
}
static void ublk_thread_deinit(struct ublk_thread *t)
{
io_uring_unregister_buffers(&t->ring);
@@ -431,23 +433,25 @@ static void ublk_thread_deinit(struct ublk_thread *t)
close(t->ring.ring_fd);
t->ring.ring_fd = -1;
}
}
-static int ublk_queue_init(struct ublk_queue *q, unsigned long long extra_flags)
+static int ublk_queue_init(struct ublk_queue *q, unsigned long long extra_flags,
+ __u8 metadata_size)
{
struct ublk_dev *dev = q->dev;
int depth = dev->dev_info.queue_depth;
int i;
- int cmd_buf_size, io_buf_size;
+ int cmd_buf_size, io_buf_size, integrity_size;
unsigned long off;
q->tgt_ops = dev->tgt.ops;
q->flags = 0;
q->q_depth = depth;
q->flags = dev->dev_info.flags;
q->flags |= extra_flags;
+ q->metadata_size = metadata_size;
/* Cache fd in queue for fast path access */
q->ublk_fd = dev->fds[0];
cmd_buf_size = ublk_queue_cmd_buf_sz(q);
@@ -459,15 +463,27 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned long long extra_flags)
q->dev->dev_info.dev_id, q->q_id);
goto fail;
}
io_buf_size = dev->dev_info.max_io_buf_bytes;
+ integrity_size = ublk_integrity_len(q, io_buf_size);
for (i = 0; i < q->q_depth; i++) {
q->ios[i].buf_addr = NULL;
q->ios[i].flags = UBLKS_IO_NEED_FETCH_RQ | UBLKS_IO_FREE;
q->ios[i].tag = i;
+ if (integrity_size) {
+ q->ios[i].integrity_buf = malloc(integrity_size);
+ if (!q->ios[i].integrity_buf) {
+ ublk_err("ublk dev %d queue %d io %d malloc(%d) failed: %m\n",
+ dev->dev_info.dev_id, q->q_id, i,
+ integrity_size);
+ goto fail;
+ }
+ }
+
+
if (ublk_queue_no_buf(q))
continue;
if (posix_memalign((void **)&q->ios[i].buf_addr,
getpagesize(), io_buf_size)) {
@@ -606,17 +622,17 @@ static void ublk_user_copy(const struct ublk_io *io, __u8 match_ublk_op)
const struct ublksrv_io_desc *iod = ublk_get_iod(q, io->tag);
__u64 off = ublk_user_copy_offset(q->q_id, io->tag);
__u8 ublk_op = ublksrv_get_op(iod);
__u32 len = iod->nr_sectors << 9;
void *addr = io->buf_addr;
+ ssize_t copied;
if (ublk_op != match_ublk_op)
return;
while (len) {
__u32 copy_len = min(len, UBLK_USER_COPY_LEN);
- ssize_t copied;
if (ublk_op == UBLK_IO_OP_WRITE)
copied = pread(q->ublk_fd, addr, copy_len, off);
else if (ublk_op == UBLK_IO_OP_READ)
copied = pwrite(q->ublk_fd, addr, copy_len, off);
@@ -625,10 +641,24 @@ static void ublk_user_copy(const struct ublk_io *io, __u8 match_ublk_op)
assert(copied == (ssize_t)copy_len);
addr += copy_len;
off += copy_len;
len -= copy_len;
}
+
+ if (!(iod->op_flags & UBLK_IO_F_INTEGRITY))
+ return;
+
+ len = ublk_integrity_len(q, iod->nr_sectors << 9);
+ off = ublk_user_copy_offset(q->q_id, io->tag);
+ off += UBLKSRV_IO_INTEGRITY_FLAG;
+ if (ublk_op == UBLK_IO_OP_WRITE)
+ copied = pread(q->ublk_fd, io->integrity_buf, len, off);
+ else if (ublk_op == UBLK_IO_OP_READ)
+ copied = pwrite(q->ublk_fd, io->integrity_buf, len, off);
+ else
+ assert(0);
+ assert(copied == (ssize_t)len);
}
int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io)
{
struct ublk_queue *q = ublk_io_to_queue(io);
@@ -1011,11 +1041,12 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
for (i = 0; i < dinfo->nr_hw_queues; i++) {
dev->q[i].dev = dev;
dev->q[i].q_id = i;
- ret = ublk_queue_init(&dev->q[i], extra_flags);
+ ret = ublk_queue_init(&dev->q[i], extra_flags,
+ ctx->metadata_size);
if (ret) {
ublk_err("ublk dev %d queue %d init queue failed\n",
dinfo->dev_id, i);
goto fail;
}
diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h
index d00f2b465cdf..830b49a7716a 100644
--- a/tools/testing/selftests/ublk/kublk.h
+++ b/tools/testing/selftests/ublk/kublk.h
@@ -110,10 +110,11 @@ struct ublk_ctrl_cmd_data {
__u32 len;
};
struct ublk_io {
char *buf_addr;
+ void *integrity_buf;
#define UBLKS_IO_NEED_FETCH_RQ (1UL << 0)
#define UBLKS_IO_NEED_COMMIT_RQ_COMP (1UL << 1)
#define UBLKS_IO_FREE (1UL << 2)
#define UBLKS_IO_NEED_GET_DATA (1UL << 3)
@@ -173,10 +174,11 @@ struct ublk_queue {
/* borrow one bit of ublk uapi flags, which may never be used */
#define UBLKS_Q_AUTO_BUF_REG_FALLBACK (1ULL << 63)
#define UBLKS_Q_NO_UBLK_FIXED_FD (1ULL << 62)
__u64 flags;
int ublk_fd; /* cached ublk char device fd */
+ __u8 metadata_size;
struct ublk_io ios[UBLK_QUEUE_DEPTH];
};
struct ublk_thread {
struct ublk_dev *dev;
@@ -222,10 +224,22 @@ static inline void ublk_set_integrity_params(const struct dev_ctx *ctx,
.csum_type = ctx->csum_type,
.tag_size = ctx->tag_size,
};
}
+static inline size_t ublk_integrity_len(const struct ublk_queue *q, size_t len)
+{
+ /* All targets currently use interval_exp = logical_bs_shift = 9 */
+ return (len >> 9) * q->metadata_size;
+}
+
+static inline size_t
+ublk_integrity_data_len(const struct ublk_queue *q, size_t integrity_len)
+{
+ return (integrity_len / q->metadata_size) << 9;
+}
+
static inline int ublk_io_auto_zc_fallback(const struct ublksrv_io_desc *iod)
{
return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF);
}
--
2.45.2
Powered by blists - more mailing lists