[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251105202823.2198194-3-csander@purestorage.com>
Date: Wed, 5 Nov 2025 13:28:23 -0700
From: Caleb Sander Mateos <csander@...estorage.com>
To: Ming Lei <ming.lei@...hat.com>,
Jens Axboe <axboe@...nel.dk>
Cc: linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
Caleb Sander Mateos <csander@...estorage.com>
Subject: [PATCH v2 2/2] ublk: use rq_for_each_bvec() for user copy
ublk_advance_io_iter() and ublk_copy_io_pages() currently open-code the
iteration over request's bvecs. Switch to the rq_for_each_bvec() macro
provided by blk-mq to avoid reaching into the bio internals and simplify
the code. Unlike bio_iter_iovec(), rq_for_each_bvec() can return
multi-page bvecs. So switch from copy_{to,from}_iter() to
copy_page_{to,from}_iter() to map and copy each page in the bvec.
Suggested-by: Ming Lei <ming.lei@...hat.com>
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
drivers/block/ublk_drv.c | 78 ++++++++++++----------------------------
1 file changed, 23 insertions(+), 55 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 40eee3e15a4c..929d40fe0250 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -911,81 +911,49 @@ static const struct block_device_operations ub_fops = {
.open = ublk_open,
.free_disk = ublk_free_disk,
.report_zones = ublk_report_zones,
};
-struct ublk_io_iter {
- struct bio *bio;
- struct bvec_iter iter;
-};
-
-/* return how many bytes are copied */
-static size_t ublk_copy_io_pages(struct ublk_io_iter *data,
- struct iov_iter *uiter, int dir)
+/*
+ * Copy data between request pages and io_iter, and 'offset'
+ * is the start point of linear offset of request.
+ */
+static size_t ublk_copy_user_pages(const struct request *req,
+ unsigned offset, struct iov_iter *uiter, int dir)
{
+ struct req_iterator iter;
+ struct bio_vec bv;
size_t done = 0;
- for (;;) {
- struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
- void *bv_buf = bvec_kmap_local(&bv);
+ rq_for_each_bvec(bv, req, iter) {
size_t copied;
+ if (offset >= bv.bv_len) {
+ offset -= bv.bv_len;
+ continue;
+ }
+
+ bv.bv_offset += offset;
+ bv.bv_len -= offset;
+ bv.bv_page += bv.bv_offset / PAGE_SIZE;
+ bv.bv_offset %= PAGE_SIZE;
if (dir == ITER_DEST)
- copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
+ copied = copy_page_to_iter(
+ bv.bv_page, bv.bv_offset, bv.bv_len, uiter);
else
- copied = copy_from_iter(bv_buf, bv.bv_len, uiter);
-
- kunmap_local(bv_buf);
+ copied = copy_page_from_iter(
+ bv.bv_page, bv.bv_offset, bv.bv_len, uiter);
done += copied;
if (copied < bv.bv_len)
break;
- /* advance bio */
- bio_advance_iter_single(data->bio, &data->iter, copied);
- if (!data->iter.bi_size) {
- data->bio = data->bio->bi_next;
- if (data->bio == NULL)
- break;
- data->iter = data->bio->bi_iter;
- }
+ offset = 0;
}
return done;
}
-static bool ublk_advance_io_iter(const struct request *req,
- struct ublk_io_iter *iter, unsigned int offset)
-{
- struct bio *bio = req->bio;
-
- for_each_bio(bio) {
- if (bio->bi_iter.bi_size > offset) {
- iter->bio = bio;
- iter->iter = bio->bi_iter;
- bio_advance_iter(iter->bio, &iter->iter, offset);
- return true;
- }
- offset -= bio->bi_iter.bi_size;
- }
- return false;
-}
-
-/*
- * Copy data between request pages and io_iter, and 'offset'
- * is the start point of linear offset of request.
- */
-static size_t ublk_copy_user_pages(const struct request *req,
- unsigned offset, struct iov_iter *uiter, int dir)
-{
- struct ublk_io_iter iter;
-
- if (!ublk_advance_io_iter(req, &iter, offset))
- return 0;
-
- return ublk_copy_io_pages(&iter, uiter, dir);
-}
-
static inline bool ublk_need_map_req(const struct request *req)
{
return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
}
--
2.45.2
Powered by blists - more mailing lists