[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220426101241.30100-5-nj.shetty@samsung.com>
Date: Tue, 26 Apr 2022 15:42:32 +0530
From: Nitesh Shetty <nj.shetty@...sung.com>
To: unlisted-recipients:; (no To-header on input)
Cc: chaitanyak@...dia.com, linux-block@...r.kernel.org,
linux-scsi@...r.kernel.org, dm-devel@...hat.com,
linux-nvme@...ts.infradead.org, linux-fsdevel@...r.kernel.org,
axboe@...nel.dk, msnitzer@...hat.com, bvanassche@....org,
martin.petersen@...cle.com, hare@...e.de, kbusch@...nel.org,
hch@....de, Frederick.Knight@...app.com, osandov@...com,
lsf-pc@...ts.linux-foundation.org, djwong@...nel.org,
josef@...icpanda.com, clm@...com, dsterba@...e.com, tytso@....edu,
jack@...e.com, nitheshshetty@...il.com, gost.dev@...sung.com,
Nitesh Shetty <nj.shetty@...sung.com>,
Vincent Fu <vincent.fu@...sung.com>,
Arnav Dawn <arnav.dawn@...sung.com>,
Alasdair Kergon <agk@...hat.com>,
Mike Snitzer <snitzer@...nel.org>,
Sagi Grimberg <sagi@...mberg.me>,
James Smart <james.smart@...adcom.com>,
Chaitanya Kulkarni <kch@...dia.com>,
Damien Le Moal <damien.lemoal@...nsource.wdc.com>,
Naohiro Aota <naohiro.aota@....com>,
Johannes Thumshirn <jth@...nel.org>,
Alexander Viro <viro@...iv.linux.org.uk>,
linux-kernel@...r.kernel.org
Subject: [PATCH v4 04/10] block: add emulation for copy
For the devices which does not support copy, copy emulation is
added. Copy-emulation is implemented by reading from source ranges
into memory and writing to the corresponding destination synchronously.
Signed-off-by: Nitesh Shetty <nj.shetty@...sung.com>
Signed-off-by: Vincent Fu <vincent.fu@...sung.com>
Signed-off-by: Arnav Dawn <arnav.dawn@...sung.com>
---
block/blk-lib.c | 128 ++++++++++++++++++++++++++++++++++++++++-
block/blk-map.c | 2 +-
include/linux/blkdev.h | 2 +
3 files changed, 130 insertions(+), 2 deletions(-)
diff --git a/block/blk-lib.c b/block/blk-lib.c
index ba9da2d2f429..58c30a42ea44 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -273,6 +273,65 @@ int blk_copy_offload(struct block_device *src_bdev, int nr_srcs,
return cio_await_completion(cio);
}
+int blk_submit_rw_buf(struct block_device *bdev, void *buf, sector_t buf_len,
+ sector_t sector, unsigned int op, gfp_t gfp_mask)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct bio *bio, *parent = NULL;
+ sector_t max_hw_len = min_t(unsigned int, queue_max_hw_sectors(q),
+ queue_max_segments(q) << (PAGE_SHIFT - SECTOR_SHIFT)) << SECTOR_SHIFT;
+ sector_t len, remaining;
+ int ret;
+
+ for (remaining = buf_len; remaining > 0; remaining -= len) {
+ len = min_t(int, max_hw_len, remaining);
+retry:
+ bio = bio_map_kern(q, buf, len, gfp_mask);
+ if (IS_ERR(bio)) {
+ len >>= 1;
+ if (len)
+ goto retry;
+ return PTR_ERR(bio);
+ }
+
+ bio->bi_iter.bi_sector = sector >> SECTOR_SHIFT;
+ bio->bi_opf = op;
+ bio_set_dev(bio, bdev);
+ bio->bi_end_io = NULL;
+ bio->bi_private = NULL;
+
+ if (parent) {
+ bio_chain(parent, bio);
+ submit_bio(parent);
+ }
+ parent = bio;
+ sector += len;
+ buf = (char *) buf + len;
+ }
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+
+ return ret;
+}
+
+static void *blk_alloc_buf(sector_t req_size, sector_t *alloc_size, gfp_t gfp_mask)
+{
+ int min_size = PAGE_SIZE;
+ void *buf;
+
+ while (req_size >= min_size) {
+ buf = kvmalloc(req_size, gfp_mask);
+ if (buf) {
+ *alloc_size = req_size;
+ return buf;
+ }
+ /* retry half the requested size */
+ req_size >>= 1;
+ }
+
+ return NULL;
+}
+
static inline int blk_copy_sanity_check(struct block_device *src_bdev,
struct block_device *dst_bdev, struct range_entry *rlist, int nr)
{
@@ -298,6 +357,68 @@ static inline int blk_copy_sanity_check(struct block_device *src_bdev,
return 0;
}
+/* returns the total copy length still need to be copied */
+static inline sector_t blk_copy_max_range(struct range_entry *rlist, int nr, sector_t *max_len)
+{
+ int i;
+ sector_t len = 0;
+
+ *max_len = 0;
+ for (i = 0; i < nr; i++) {
+ *max_len = max(*max_len, rlist[i].len - rlist[i].comp_len);
+ len += (rlist[i].len - rlist[i].comp_len);
+ }
+
+ return len;
+}
+
+/*
+ * If native copy offload feature is absent, this function tries to emulate,
+ * by copying data from source to a temporary buffer and from buffer to
+ * destination device.
+ */
+static int blk_copy_emulate(struct block_device *src_bdev, int nr,
+ struct range_entry *rlist, struct block_device *dest_bdev, gfp_t gfp_mask)
+{
+ void *buf = NULL;
+ int ret, nr_i = 0;
+ sector_t src, dst, copy_len, buf_len, read_len, copied_len,
+ max_len = 0, remaining = 0, offset = 0;
+
+ copy_len = blk_copy_max_range(rlist, nr, &max_len);
+ buf = blk_alloc_buf(max_len, &buf_len, gfp_mask);
+ if (!buf)
+ return -ENOMEM;
+
+ for (copied_len = 0; copied_len < copy_len; copied_len += read_len) {
+ if (!remaining) {
+ offset = rlist[nr_i].comp_len;
+ src = rlist[nr_i].src + offset;
+ dst = rlist[nr_i].dst + offset;
+ remaining = rlist[nr_i++].len - offset;
+ }
+
+ read_len = min_t(sector_t, remaining, buf_len);
+ if (!read_len)
+ continue;
+ ret = blk_submit_rw_buf(src_bdev, buf, read_len, src, REQ_OP_READ, gfp_mask);
+ if (ret)
+ goto out;
+ src += read_len;
+ remaining -= read_len;
+ ret = blk_submit_rw_buf(dest_bdev, buf, read_len, dst, REQ_OP_WRITE,
+ gfp_mask);
+ if (ret)
+ goto out;
+ else
+ rlist[nr_i - 1].comp_len += read_len;
+ dst += read_len;
+ }
+out:
+ kvfree(buf);
+ return ret;
+}
+
static inline bool blk_check_copy_offload(struct request_queue *src_q,
struct request_queue *dest_q)
{
@@ -325,6 +446,7 @@ int blkdev_issue_copy(struct block_device *src_bdev, int nr,
struct request_queue *src_q = bdev_get_queue(src_bdev);
struct request_queue *dest_q = bdev_get_queue(dest_bdev);
int ret = -EINVAL;
+ bool offload = false;
if (!src_q || !dest_q)
return -ENXIO;
@@ -342,9 +464,13 @@ int blkdev_issue_copy(struct block_device *src_bdev, int nr,
if (ret)
return ret;
- if (blk_check_copy_offload(src_q, dest_q))
+ offload = blk_check_copy_offload(src_q, dest_q);
+ if (offload)
ret = blk_copy_offload(src_bdev, nr, rlist, dest_bdev, gfp_mask);
+ if (ret || !offload)
+ ret = blk_copy_emulate(src_bdev, nr, rlist, dest_bdev, gfp_mask);
+
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_issue_copy);
diff --git a/block/blk-map.c b/block/blk-map.c
index 7ffde64f9019..ca2ad2c21f42 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -340,7 +340,7 @@ static void bio_map_kern_endio(struct bio *bio)
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-static struct bio *bio_map_kern(struct request_queue *q, void *data,
+struct bio *bio_map_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c6cb3fe82ba2..ea1f3c8f8dad 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1121,6 +1121,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp);
+struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
+ gfp_t gfp_mask);
int blkdev_issue_copy(struct block_device *src_bdev, int nr_srcs,
struct range_entry *src_rlist, struct block_device *dest_bdev, gfp_t gfp_mask);
--
2.35.1.500.gb896f729e2
Powered by blists - more mailing lists