[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230811105300.15889-8-nj.shetty@samsung.com>
Date: Fri, 11 Aug 2023 16:22:50 +0530
From: Nitesh Shetty <nj.shetty@...sung.com>
To: Jens Axboe <axboe@...nel.dk>, Jonathan Corbet <corbet@....net>,
Alasdair Kergon <agk@...hat.com>,
Mike Snitzer <snitzer@...nel.org>, dm-devel@...hat.com,
Keith Busch <kbusch@...nel.org>,
Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>,
Chaitanya Kulkarni <kch@...dia.com>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>
Cc: martin.petersen@...cle.com, mcgrof@...nel.org, dlemoal@...nel.org,
gost.dev@...sung.com, Nitesh Shetty <nj.shetty@...sung.com>,
Kanchan Joshi <joshi.k@...sung.com>,
Javier González <javier.gonz@...sung.com>,
Anuj Gupta <anuj20.g@...sung.com>, linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-doc@...r.kernel.org,
linux-nvme@...ts.infradead.org, linux-fsdevel@...r.kernel.org
Subject: [PATCH v14 07/11] nvme: add copy offload support
Current design only supports single source range.
We receive a request with REQ_OP_COPY_SRC.
Parse this request which consists of src(1st) and dst(2nd) bios.
Form a copy command (TP 4065)
trace event support for nvme_copy_cmd.
Set the device copy limits to queue limits.
Signed-off-by: Kanchan Joshi <joshi.k@...sung.com>
Signed-off-by: Nitesh Shetty <nj.shetty@...sung.com>
Signed-off-by: Javier González <javier.gonz@...sung.com>
Signed-off-by: Anuj Gupta <anuj20.g@...sung.com>
---
drivers/nvme/host/constants.c | 1 +
drivers/nvme/host/core.c | 79 +++++++++++++++++++++++++++++++++++
drivers/nvme/host/trace.c | 19 +++++++++
include/linux/blkdev.h | 1 +
include/linux/nvme.h | 43 +++++++++++++++++--
5 files changed, 140 insertions(+), 3 deletions(-)
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 20f46c230885..2f504a2b1fe8 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -19,6 +19,7 @@ static const char * const nvme_ops[] = {
[nvme_cmd_resv_report] = "Reservation Report",
[nvme_cmd_resv_acquire] = "Reservation Acquire",
[nvme_cmd_resv_release] = "Reservation Release",
+ [nvme_cmd_copy] = "Copy Offload",
[nvme_cmd_zone_mgmt_send] = "Zone Management Send",
[nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
[nvme_cmd_zone_append] = "Zone Append",
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 37b6fa746662..214628356e44 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -763,6 +763,63 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
}
+static inline blk_status_t nvme_setup_copy_offload(struct nvme_ns *ns,
+ struct request *req,
+ struct nvme_command *cmnd)
+{
+ struct nvme_copy_range *range = NULL;
+ struct bio *bio;
+ u64 dst_lba, src_lba, n_lba;
+ u16 nr_range = 1, control = 0, seg = 1;
+
+ if (blk_rq_nr_phys_segments(req) != COPY_MAX_SEGMENTS)
+ return BLK_STS_IOERR;
+
+ /*
+ * First bio contains information about source and last bio contains
+ * information about destination.
+ */
+ __rq_for_each_bio(bio, req) {
+ if (seg == blk_rq_nr_phys_segments(req)) {
+ dst_lba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+ if (n_lba != bio->bi_iter.bi_size >> ns->lba_shift)
+ return BLK_STS_IOERR;
+ } else {
+ src_lba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+ n_lba = bio->bi_iter.bi_size >> ns->lba_shift;
+ }
+ seg++;
+ }
+
+ if (req->cmd_flags & REQ_FUA)
+ control |= NVME_RW_FUA;
+
+ if (req->cmd_flags & REQ_FAILFAST_DEV)
+ control |= NVME_RW_LR;
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->copy.opcode = nvme_cmd_copy;
+ cmnd->copy.nsid = cpu_to_le32(ns->head->ns_id);
+ cmnd->copy.control = cpu_to_le16(control);
+ cmnd->copy.sdlba = cpu_to_le64(dst_lba);
+ cmnd->copy.nr_range = 0;
+
+ range = kmalloc_array(nr_range, sizeof(*range),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!range)
+ return BLK_STS_RESOURCE;
+
+ range[0].slba = cpu_to_le64(src_lba);
+ range[0].nlb = cpu_to_le16(n_lba - 1);
+
+ req->special_vec.bv_page = virt_to_page(range);
+ req->special_vec.bv_offset = offset_in_page(range);
+ req->special_vec.bv_len = sizeof(*range) * nr_range;
+ req->rq_flags |= RQF_SPECIAL_PAYLOAD;
+
+ return BLK_STS_OK;
+}
+
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
{
@@ -1005,6 +1062,11 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
case REQ_OP_ZONE_APPEND:
ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
break;
+ case REQ_OP_COPY_SRC:
+ ret = nvme_setup_copy_offload(ns, req, cmd);
+ break;
+ case REQ_OP_COPY_DST:
+ return BLK_STS_IOERR;
default:
WARN_ON_ONCE(1);
return BLK_STS_IOERR;
@@ -1745,6 +1807,21 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
}
+static void nvme_config_copy(struct gendisk *disk, struct nvme_ns *ns,
+ struct nvme_id_ns *id)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ struct request_queue *q = disk->queue;
+
+ if (!(ctrl->oncs & NVME_CTRL_ONCS_COPY)) {
+ blk_queue_max_copy_hw_sectors(q, 0);
+ return;
+ }
+
+ blk_queue_max_copy_hw_sectors(q, nvme_lba_to_sect(ns,
+ le16_to_cpu(id->mssrl)));
+}
+
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
return uuid_equal(&a->uuid, &b->uuid) &&
@@ -1944,6 +2021,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
set_capacity_and_notify(disk, capacity);
nvme_config_discard(disk, ns);
+ nvme_config_copy(disk, ns, id);
blk_queue_max_write_zeroes_sectors(disk->queue,
ns->ctrl->max_zeroes_sectors);
}
@@ -4634,6 +4712,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_copy_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 1c36fcedea20..82c6aef77c31 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -150,6 +150,23 @@ static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
return ret;
}
+static const char *nvme_trace_copy(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 sdlba = get_unaligned_le64(cdw10);
+ u8 nr_range = get_unaligned_le16(cdw10 + 8);
+ u16 control = get_unaligned_le16(cdw10 + 10);
+ u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+ u32 reftag = get_unaligned_le32(cdw10 + 16);
+
+ trace_seq_printf(p,
+ "sdlba=%llu, nr_range=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
+ sdlba, nr_range, control, dsmgmt, reftag);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -243,6 +260,8 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
return nvme_trace_zone_mgmt_send(p, cdw10);
case nvme_cmd_zone_mgmt_recv:
return nvme_trace_zone_mgmt_recv(p, cdw10);
+ case nvme_cmd_copy:
+ return nvme_trace_copy(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 67eab354592c..a31e0ba5badd 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1226,6 +1226,7 @@ static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
/* maximum copy offload length, this is set to 128MB based on current testing */
#define COPY_MAX_BYTES (1 << 27)
+#define COPY_MAX_SEGMENTS 2
static inline unsigned int bdev_max_copy_sectors(struct block_device *bdev)
{
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 26dd3f859d9d..7744538c4ca4 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -337,7 +337,7 @@ struct nvme_id_ctrl {
__u8 nvscc;
__u8 nwpc;
__le16 acwu;
- __u8 rsvd534[2];
+ __le16 ocfs;
__le32 sgls;
__le32 mnan;
__u8 rsvd544[224];
@@ -365,6 +365,7 @@ enum {
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
NVME_CTRL_ONCS_RESERVATIONS = 1 << 5,
NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
+ NVME_CTRL_ONCS_COPY = 1 << 8,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
@@ -414,7 +415,10 @@ struct nvme_id_ns {
__le16 npdg;
__le16 npda;
__le16 nows;
- __u8 rsvd74[18];
+ __le16 mssrl;
+ __le32 mcl;
+ __u8 msrc;
+ __u8 rsvd91[11];
__le32 anagrpid;
__u8 rsvd96[3];
__u8 nsattr;
@@ -831,6 +835,7 @@ enum nvme_opcode {
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
nvme_cmd_resv_release = 0x15,
+ nvme_cmd_copy = 0x19,
nvme_cmd_zone_mgmt_send = 0x79,
nvme_cmd_zone_mgmt_recv = 0x7a,
nvme_cmd_zone_append = 0x7d,
@@ -854,7 +859,8 @@ enum nvme_opcode {
nvme_opcode_name(nvme_cmd_resv_release), \
nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
- nvme_opcode_name(nvme_cmd_zone_append))
+ nvme_opcode_name(nvme_cmd_zone_append), \
+ nvme_opcode_name(nvme_cmd_copy))
@@ -1031,6 +1037,36 @@ struct nvme_dsm_range {
__le64 slba;
};
+struct nvme_copy_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2;
+ __le64 metadata;
+ union nvme_data_ptr dptr;
+ __le64 sdlba;
+ __u8 nr_range;
+ __u8 rsvd12;
+ __le16 control;
+ __le16 rsvd13;
+ __le16 dspec;
+ __le32 ilbrt;
+ __le16 lbat;
+ __le16 lbatm;
+};
+
+struct nvme_copy_range {
+ __le64 rsvd0;
+ __le64 slba;
+ __le16 nlb;
+ __le16 rsvd18;
+ __le32 rsvd20;
+ __le32 eilbrt;
+ __le16 elbat;
+ __le16 elbatm;
+};
+
struct nvme_write_zeroes_cmd {
__u8 opcode;
__u8 flags;
@@ -1792,6 +1828,7 @@ struct nvme_command {
struct nvme_download_firmware dlfw;
struct nvme_format_cmd format;
struct nvme_dsm_cmd dsm;
+ struct nvme_copy_command copy;
struct nvme_write_zeroes_cmd write_zeroes;
struct nvme_zone_mgmt_send_cmd zms;
struct nvme_zone_mgmt_recv_cmd zmr;
--
2.35.1.500.gb896f729e2
Powered by blists - more mailing lists