[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251106171647.2590074-3-csander@purestorage.com>
Date: Thu, 6 Nov 2025 10:16:47 -0700
From: Caleb Sander Mateos <csander@...estorage.com>
To: Ming Lei <ming.lei@...hat.com>,
Jens Axboe <axboe@...nel.dk>
Cc: linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
Caleb Sander Mateos <csander@...estorage.com>
Subject: [PATCH v3 2/2] ublk: use rq_for_each_segment() for user copy
ublk_advance_io_iter() and ublk_copy_io_pages() currently open-code the
iteration over the request's bvecs. Switch to the rq_for_each_segment()
macro provided by blk-mq to avoid reaching into the bio internals and
simplify the code.
Suggested-by: Ming Lei <ming.lei@...hat.com>
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
drivers/block/ublk_drv.c | 70 +++++++++++-----------------------------
1 file changed, 19 insertions(+), 51 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 40eee3e15a4c..5cf288809226 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -911,26 +911,33 @@ static const struct block_device_operations ub_fops = {
.open = ublk_open,
.free_disk = ublk_free_disk,
.report_zones = ublk_report_zones,
};
-struct ublk_io_iter {
- struct bio *bio;
- struct bvec_iter iter;
-};
-
-/* return how many bytes are copied */
-static size_t ublk_copy_io_pages(struct ublk_io_iter *data,
- struct iov_iter *uiter, int dir)
+/*
+ * Copy data between request pages and io_iter, and 'offset'
+ * is the start point of linear offset of request.
+ */
+static size_t ublk_copy_user_pages(const struct request *req,
+ unsigned offset, struct iov_iter *uiter, int dir)
{
+ struct req_iterator iter;
+ struct bio_vec bv;
size_t done = 0;
- for (;;) {
- struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
- void *bv_buf = bvec_kmap_local(&bv);
+ rq_for_each_segment(bv, req, iter) {
+ void *bv_buf;
size_t copied;
+ if (offset >= bv.bv_len) {
+ offset -= bv.bv_len;
+ continue;
+ }
+
+ bv.bv_offset += offset;
+ bv.bv_len -= offset;
+ bv_buf = bvec_kmap_local(&bv);
if (dir == ITER_DEST)
copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
else
copied = copy_from_iter(bv_buf, bv.bv_len, uiter);
@@ -938,54 +945,15 @@ static size_t ublk_copy_io_pages(struct ublk_io_iter *data,
done += copied;
if (copied < bv.bv_len)
break;
- /* advance bio */
- bio_advance_iter_single(data->bio, &data->iter, copied);
- if (!data->iter.bi_size) {
- data->bio = data->bio->bi_next;
- if (data->bio == NULL)
- break;
- data->iter = data->bio->bi_iter;
- }
+ offset = 0;
}
return done;
}
-static bool ublk_advance_io_iter(const struct request *req,
- struct ublk_io_iter *iter, unsigned int offset)
-{
- struct bio *bio = req->bio;
-
- for_each_bio(bio) {
- if (bio->bi_iter.bi_size > offset) {
- iter->bio = bio;
- iter->iter = bio->bi_iter;
- bio_advance_iter(iter->bio, &iter->iter, offset);
- return true;
- }
- offset -= bio->bi_iter.bi_size;
- }
- return false;
-}
-
-/*
- * Copy data between request pages and io_iter, and 'offset'
- * is the start point of linear offset of request.
- */
-static size_t ublk_copy_user_pages(const struct request *req,
- unsigned offset, struct iov_iter *uiter, int dir)
-{
- struct ublk_io_iter iter;
-
- if (!ublk_advance_io_iter(req, &iter, offset))
- return 0;
-
- return ublk_copy_io_pages(&iter, uiter, dir);
-}
-
static inline bool ublk_need_map_req(const struct request *req)
{
return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
}
--
2.45.2
Powered by blists - more mailing lists