lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 23 Aug 2013 02:26:58 +0000
From:	"Nicholas A. Bellinger" <nab@...erainc.com>
To:	target-devel <target-devel@...r.kernel.org>
Cc:	lkml <linux-kernel@...r.kernel.org>,
	linux-scsi <linux-scsi@...r.kernel.org>,
	Christoph Hellwig <hch@....de>, Hannes Reinecke <hare@...e.de>,
	Martin Petersen <martin.petersen@...cle.com>,
	Chris Mason <chris.mason@...ionio.com>,
	Roland Dreier <roland@...estorage.com>,
	Zach Brown <zab@...hat.com>,
	Kent Overstreet <kmo@...erainc.com>,
	Theodore Tso <tytso@....edu>,
	James Bottomley <JBottomley@...allels.com>,
	Nicholas Bellinger <nab@...ux-iscsi.org>,
	Nicholas Bellinger <nab@...erainc.com>
Subject: [PATCH 6/9] target: Add support for EXTENDED_COPY copy offload emulation

From: Nicholas Bellinger <nab@...erainc.com>

This patch adds support for EXTENDED_COPY emulation from SPC-3, that
enables full copy offload target support within both a single virtual
backend device, and across multiple virtual backend devices.  It also
functions independent of target fabric, and supports copy offload
across multiple target fabric ports.

This implemenation supports both EXTENDED_COPY PUSH and PULL models
of operation, so the actual CDB may be received on either source or
desination logical unit.

For Target Descriptors, it currently supports the NAA IEEE Registered
Extended designator (type 0xe4), which allows the reference of target
ports to occur independent of fabric type using EVPD 0x83 WWNs.

For Segment Descriptors, it currently supports copy from block to
block (0x02) mode.

It also honors any present SCSI reservations of the destination target
port.  Note that only Supports No List Identifier (SNLID=1) mode is
supported.

Also included is basic RECEIVE_COPY_RESULTS with service action type
OPERATING PARAMETERS (0x03) required for SNLID=1 operation.

Cc: Christoph Hellwig <hch@....de>
Cc: Hannes Reinecke <hare@...e.de>
Cc: Martin Petersen <martin.petersen@...cle.com>
Cc: Chris Mason <chris.mason@...ionio.com>
Cc: Roland Dreier <roland@...estorage.com>
Cc: Zach Brown <zab@...hat.com>
Cc: James Bottomley <JBottomley@...allels.com>
Cc: Nicholas Bellinger <nab@...ux-iscsi.org>
Signed-off-by: Nicholas Bellinger <nab@...erainc.com>
---
 drivers/target/Makefile            |    3 +-
 drivers/target/target_core_xcopy.c | 1122 ++++++++++++++++++++++++++++++++++++
 drivers/target/target_core_xcopy.h |   62 ++
 include/target/target_core_base.h  |    1 +
 4 files changed, 1187 insertions(+), 1 deletions(-)
 create mode 100644 drivers/target/target_core_xcopy.c
 create mode 100644 drivers/target/target_core_xcopy.h

diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 9fdcb56..85b012d 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -13,7 +13,8 @@ target_core_mod-y		:= target_core_configfs.o \
 				   target_core_spc.o \
 				   target_core_ua.o \
 				   target_core_rd.o \
-				   target_core_stat.o
+				   target_core_stat.o \
+				   target_core_xcopy.o
 
 obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
 
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
new file mode 100644
index 0000000..e0fabea
--- /dev/null
+++ b/drivers/target/target_core_xcopy.c
@@ -0,0 +1,1122 @@
+/*******************************************************************************
+ * Filename: target_core_xcopy.c
+ *
+ * This file contains support for SPC-4 Extended-Copy offload with generic
+ * TCM backends.
+ *
+ * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
+ *
+ * Author:
+ * Nicholas A. Bellinger <nab@...erainc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+#include "target_core_xcopy.h"
+
+/* #define XCOPY_DBG_CTL */
+#ifdef XCOPY_DBG_CTL
+#define XCOPY_CTL(x...) printk(KERN_INFO x)
+#else
+#define XCOPY_CTL(x...)
+#endif
+
+/* #define XCOPY_DBG_IO */
+#ifdef XCOPY_DBG_IO
+#define XCOPY_IO(x...) printk(KERN_INFO x)
+#else
+#define XCOPY_IO(x...)
+#endif
+
+static struct workqueue_struct *xcopy_wq = NULL;
+/*
+ * From target_core_spc.c
+ */
+extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+/*
+ * From target_core_device.c
+ */
+extern struct mutex g_device_mutex;
+extern struct list_head g_device_list;
+/*
+ * From target_core_configfs.c
+ */
+extern struct configfs_subsystem *target_core_subsystem[];
+
+static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
+{
+	int off = 0;
+
+	buf[off++] = (0x6 << 4);
+	buf[off++] = 0x01;
+	buf[off++] = 0x40;
+	buf[off] = (0x5 << 4);
+
+	spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
+	return 0;
+}
+
+static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+					bool src)
+{
+	struct se_device *se_dev;
+	struct configfs_subsystem *subsys = target_core_subsystem[0];
+	unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
+	int rc;
+
+	if (src == true)
+		dev_wwn = &xop->dst_tid_wwn[0];
+	else
+		dev_wwn = &xop->src_tid_wwn[0];
+
+	mutex_lock(&g_device_mutex);
+	list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
+
+		memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+		target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
+
+		rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
+		if (rc != 0)
+			continue;
+
+		if (src == true) {
+			xop->dst_dev = se_dev;
+			XCOPY_CTL("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
+				" se_dev\n", xop->dst_dev);
+		} else {
+			xop->src_dev = se_dev;
+			XCOPY_CTL("XCOPY 0xe4: Setting xop->src_dev: %p from located"
+				" se_dev\n", xop->src_dev);
+		}
+
+		rc = configfs_depend_item(subsys,
+				&se_dev->dev_group.cg_item);
+		if (rc != 0) {
+			pr_err("configfs_depend_item attempt failed:"
+				" %d for se_dev: %p\n", rc, se_dev);
+			mutex_unlock(&g_device_mutex);
+			return rc;
+		}
+
+		XCOPY_CTL("Called configfs_depend_item for subsys: %p se_dev: %p"
+			" se_dev->se_dev_group: %p\n", subsys, se_dev,
+			&se_dev->dev_group);
+
+		mutex_unlock(&g_device_mutex);
+		return 0;
+	}
+	mutex_unlock(&g_device_mutex);
+
+	pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+	return -EINVAL;
+}
+
+static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+				unsigned char *p, bool src)
+{
+	unsigned char *desc = p;
+	unsigned short ript;
+	u8 desig_len;
+	/*
+	 * Extract RELATIVE INITIATOR PORT IDENTIFIER
+	 */
+	ript = get_unaligned_be16(&desc[2]);
+	XCOPY_CTL("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
+	/*
+	 * Check for supported code set, association, and designator type
+	 */
+	if ((desc[4] & 0x0f) != 0x1) {
+		pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
+		return -EINVAL;
+	}
+	if ((desc[5] & 0x30) != 0x00) {
+		pr_err("XCOPY 0xe4: association other than LUN not supported\n");
+		return -EINVAL;
+	}
+	if ((desc[5] & 0x0f) != 0x3) {
+		pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
+				(desc[5] & 0x0f));
+		return -EINVAL;
+	}
+	/*
+	 * Check for matching 16 byte length for NAA IEEE Registered Extended
+	 * Assigned designator
+	 */
+	desig_len = desc[7];
+	if (desig_len != 16) {
+		pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
+		return -EINVAL;
+	}
+	XCOPY_CTL("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
+	/*
+	 * Check for NAA IEEE Registered Extended Assigned header..
+	 */
+	if ((desc[8] & 0xf0) != 0x60) {
+		pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
+					(desc[8] & 0xf0));
+		return -EINVAL;
+	}
+
+	if (src == true) {
+		memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+		/*
+		 * Determine if the source designator matches the local device
+		 */
+		if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
+				XCOPY_NAA_IEEE_REGEX_LEN)) {
+			xop->op_origin = XCOL_SOURCE_RECV_OP;
+			xop->src_dev = se_cmd->se_dev;
+			XCOPY_CTL("XCOPY 0xe4: Set xop->src_dev %p from source"
+					" received xop\n", xop->src_dev);
+		}
+	} else {
+		memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+		/*
+		 * Determine if the destination designator matches the local device
+		 */
+		if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
+				XCOPY_NAA_IEEE_REGEX_LEN)) {
+			xop->op_origin = XCOL_DEST_RECV_OP;
+			xop->dst_dev = se_cmd->se_dev;
+			XCOPY_CTL("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
+				" received xop\n", xop->dst_dev);
+		}
+	}
+
+	return 0;
+}
+
+static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
+				struct xcopy_op *xop, unsigned char *p,
+				unsigned short tdll)
+{
+	struct se_device *local_dev = se_cmd->se_dev;
+	unsigned char *desc = p;
+	int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
+	unsigned short start = 0;
+	bool src = true;
+
+	if (offset != 0) {
+		pr_err("XCOPY target descriptor list length is not"
+			" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+		return -EINVAL;
+	}
+	if (tdll > 64) {
+		pr_err("XCOPY target descriptor supports a maximum"
+			" two src/dest descriptors, tdll: %hu too large..\n", tdll);
+		return -EINVAL;
+	}
+	/*
+	 * Generate an IEEE Registered Extended designator based upon the
+	 * se_device the XCOPY was received upon..
+	 */
+	memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+	target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
+
+	while (start < tdll) {
+		/*
+		 * Check target descriptor identification with 0xE4 type with
+		 * use VPD 0x83 WWPN matching ..
+		 */
+		switch (desc[0]) {
+		case 0xe4:
+			rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
+							&desc[0], src);
+			if (rc != 0)
+				goto out;
+			/*
+			 * Assume target descriptors are in source -> destination order..
+			 */
+			if (src == true)
+				src = false;
+			else
+				src = true;
+			start += XCOPY_TARGET_DESC_LEN;
+			desc += XCOPY_TARGET_DESC_LEN;
+			ret++;
+			break;
+		default:
+			pr_err("XCOPY unsupported descriptor type code:"
+					" 0x%02x\n", desc[0]);
+			goto out;
+		}
+	}
+
+	if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+		rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
+	else
+		rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+
+	if (rc < 0)
+		goto out;
+
+	XCOPY_CTL("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: "
+		"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+		"%02x%02x%02x%02x%02x%02x\n", xop->src_dev, xop->src_tid_wwn[0],
+		xop->src_tid_wwn[1], xop->src_tid_wwn[2], xop->src_tid_wwn[3],
+		xop->src_tid_wwn[4], xop->src_tid_wwn[5], xop->src_tid_wwn[6],
+		xop->src_tid_wwn[7], xop->src_tid_wwn[8], xop->src_tid_wwn[9],
+		xop->src_tid_wwn[10], xop->src_tid_wwn[11], xop->src_tid_wwn[12],
+		xop->src_tid_wwn[13], xop->src_tid_wwn[14], xop->src_tid_wwn[15]);
+	XCOPY_CTL("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: "
+		"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+		"%02x%02x%02x%02x%02x%02x\n",  xop->dst_dev, xop->dst_tid_wwn[0],
+		xop->dst_tid_wwn[1], xop->dst_tid_wwn[2], xop->dst_tid_wwn[3],
+		xop->dst_tid_wwn[4], xop->dst_tid_wwn[5], xop->dst_tid_wwn[6],
+		xop->dst_tid_wwn[7], xop->dst_tid_wwn[8], xop->dst_tid_wwn[9],
+		xop->dst_tid_wwn[10], xop->dst_tid_wwn[11], xop->dst_tid_wwn[12],
+		xop->dst_tid_wwn[13], xop->dst_tid_wwn[14], xop->dst_tid_wwn[15]);
+
+	return ret;
+
+out:
+	return -EINVAL;
+}
+
+static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
+					unsigned char *p)
+{
+	unsigned char *desc = p;
+	int dc = (desc[1] & 0x02);
+	unsigned short desc_len;
+
+	desc_len = get_unaligned_be16(&desc[2]);
+	if (desc_len != 0x18) {
+		pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
+				" %hu\n", desc_len);
+		return -EINVAL;
+	}
+
+	xop->stdi = get_unaligned_be16(&desc[4]);
+	xop->dtdi = get_unaligned_be16(&desc[6]);
+	XCOPY_CTL("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
+		desc_len, xop->stdi, xop->dtdi, dc);
+
+	xop->nolb = get_unaligned_be16(&desc[10]);
+	xop->src_lba = get_unaligned_be64(&desc[12]);
+	xop->dst_lba = get_unaligned_be64(&desc[20]);
+	XCOPY_CTL("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
+		xop->nolb, (unsigned long long)xop->src_lba,
+		(unsigned long long)xop->dst_lba);
+
+	if (dc != 0) {
+		xop->dbl = (desc[29] << 16) & 0xff;
+		xop->dbl |= (desc[30] << 8) & 0xff;
+		xop->dbl |= desc[31] & 0xff;
+
+		XCOPY_CTL("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
+	}
+	return 0;
+}
+
+static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
+				struct xcopy_op *xop, unsigned char *p,
+				unsigned int sdll)
+{
+	unsigned char *desc = p;
+	unsigned int start = 0;
+	int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
+
+	if (offset != 0) {
+		pr_err("XCOPY segment descriptor list length is not"
+			" multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+		return -EINVAL;
+	}
+
+	while (start < sdll) {
+		/*
+		 * Check segment descriptor type code for block -> block
+		 */
+		switch (desc[0]) {
+		case 0x02:
+			rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
+			if (rc < 0)
+				goto out;
+
+			ret++;
+			start += XCOPY_SEGMENT_DESC_LEN;
+			desc += XCOPY_SEGMENT_DESC_LEN;
+			break;
+		default:
+			pr_err("XCOPY unspported segment descriptor"
+				"type: 0x%02x\n", desc[0]);
+			goto out;
+		}
+	}
+
+	return ret;
+
+out:
+	return -EINVAL;
+}
+
+/*
+ * Start xcopy_pt ops
+ */
+
+struct xcopy_pt_cmd {
+	bool remote_port;
+	struct se_cmd se_cmd;
+	struct xcopy_op *xcopy_op;
+	struct completion xpt_passthrough_wsem;
+	struct completion xpt_passthrough_sem;
+};
+
+static struct se_port xcopy_pt_port;
+static struct se_portal_group xcopy_pt_tpg;
+static struct se_session xcopy_pt_sess;
+static struct se_node_acl xcopy_pt_nacl;
+
+static char *xcopy_pt_get_fabric_name(void)
+{
+        return "xcopy-pt";
+}
+
+static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd)
+{
+        return 0;
+}
+
+static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
+{
+        return 0;
+}
+
+static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
+{
+	struct configfs_subsystem *subsys = target_core_subsystem[0];
+	struct se_device *remote_dev;
+
+	if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+		remote_dev = xop->dst_dev;
+	else
+		remote_dev = xop->src_dev;
+
+	XCOPY_CTL("Calling configfs_undepend_item for subsys: %p"
+		  " remote_dev: %p remote_dev->dev_group: %p\n",
+		  subsys, remote_dev, &remote_dev->dev_group.cg_item);
+
+	configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
+}
+
+static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
+{
+	struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+				struct xcopy_pt_cmd, se_cmd);
+
+	if (xpt_cmd->remote_port)
+		kfree(se_cmd->se_lun);
+
+	kfree(xpt_cmd);
+}
+
+static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
+{
+	struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+				struct xcopy_pt_cmd, se_cmd);
+
+	complete(&xpt_cmd->xpt_passthrough_sem);
+	return 0;
+}
+
+static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
+{
+	struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+				struct xcopy_pt_cmd, se_cmd);
+
+	complete(&xpt_cmd->xpt_passthrough_wsem);
+	return 0;
+}
+
+static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static struct target_core_fabric_ops xcopy_pt_tfo = {
+	.get_fabric_name	= xcopy_pt_get_fabric_name,
+	.get_task_tag		= xcopy_pt_get_tag,
+	.get_cmd_state		= xcopy_pt_get_cmd_state,
+	.release_cmd		= xcopy_pt_release_cmd,
+	.check_stop_free	= xcopy_pt_check_stop_free,
+	.write_pending		= xcopy_pt_write_pending,
+	.write_pending_status	= xcopy_pt_write_pending_status,
+	.queue_data_in		= xcopy_pt_queue_data_in,
+	.queue_status		= xcopy_pt_queue_status,
+};
+
+/*
+ * End xcopy_pt_ops
+ */
+
+int target_xcopy_setup_pt(void)
+{
+	xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
+	if (!xcopy_wq) {
+		pr_err("Unable to allocate xcopy_wq\n");
+		return -ENOMEM;
+	}
+
+	memset(&xcopy_pt_port, 0, sizeof(struct se_port));
+	INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
+	INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
+	mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
+
+	memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
+	INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
+	INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
+	INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
+
+	xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
+	xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
+
+	memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
+	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
+	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
+	memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
+	INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
+	INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
+
+	xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
+	xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
+
+	xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
+	xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
+
+	return 0;
+}
+
+void target_xcopy_release_pt(void)
+{
+	if (xcopy_wq)
+		destroy_workqueue(xcopy_wq);
+}
+
+static void target_xcopy_setup_pt_port(
+	struct xcopy_pt_cmd *xpt_cmd,
+	struct xcopy_op *xop,
+	bool remote_port)
+{
+	struct se_cmd *ec_cmd = xop->xop_se_cmd;
+	struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
+
+	if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
+		/*
+		 * Honor destination port reservations for X-COPY PUSH emulation
+		 * when CDB is received on local source port, and READs blocks to
+		 * WRITE on remote destination port.
+		 */
+		if (remote_port) {
+			xpt_cmd->remote_port = remote_port;
+			pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
+			XCOPY_IO("Setup emulated remote DEST xcopy_pt_port: %p to"
+				" cmd->se_lun->lun_sep for X-COPY data PUSH\n",
+				pt_cmd->se_lun->lun_sep);
+		} else {
+			pt_cmd->se_lun = ec_cmd->se_lun;
+			pt_cmd->se_dev = ec_cmd->se_dev;
+
+			XCOPY_IO("Honoring local SRC port from ec_cmd->se_dev:"
+				" %p\n", pt_cmd->se_dev);
+			pt_cmd->se_lun = ec_cmd->se_lun;
+			XCOPY_IO("Honoring local SRC port from ec_cmd->se_lun: %p\n",
+				pt_cmd->se_lun);
+		}
+	} else {
+		/*
+		 * Honor source port reservation for X-COPY PULL emulation
+		 * when CDB is received on local desintation port, and READs
+		 * blocks from the remote source port to WRITE on local
+		 * destination port.
+		 */
+		if (remote_port) {
+			xpt_cmd->remote_port = remote_port;
+			pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
+			XCOPY_IO("Setup emulated remote SRC xcopy_pt_port: %p to"
+				" cmd->se_lun->lun_sep for X-COPY data PULL\n",
+				pt_cmd->se_lun->lun_sep);
+		} else {
+			pt_cmd->se_lun = ec_cmd->se_lun;
+			pt_cmd->se_dev = ec_cmd->se_dev;
+
+			XCOPY_IO("Honoring local DST port from ec_cmd->se_dev:"
+				" %p\n", pt_cmd->se_dev);
+			pt_cmd->se_lun = ec_cmd->se_lun;
+			XCOPY_IO("Honoring local DST port from ec_cmd->se_lun: %p\n",
+				pt_cmd->se_lun);
+		}
+	}
+}
+
+static int target_xcopy_init_pt_lun(
+	struct xcopy_pt_cmd *xpt_cmd,
+	struct xcopy_op *xop,
+	struct se_device *se_dev,
+	struct se_cmd *pt_cmd,
+	bool remote_port)
+{
+	/*
+	 * Don't allocate + init an pt_cmd->se_lun if honoring local port for
+	 * reservations.  The pt_cmd->se_lun pointer will be setup from within
+	 * target_xcopy_setup_pt_port()
+	 */
+	if (remote_port == false) {
+		pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
+		return 0;
+	}
+
+	pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL);
+	if (!pt_cmd->se_lun) {
+		pr_err("Unable to allocate pt_cmd->se_lun\n");
+		return -ENOMEM;
+	}
+	init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
+	INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
+	INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
+	spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
+	spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
+	spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
+
+	pt_cmd->se_dev = se_dev;
+
+	XCOPY_IO("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
+	pt_cmd->se_lun->lun_se_dev = se_dev;
+	pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
+
+	XCOPY_IO("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
+		pt_cmd->se_lun->lun_se_dev);
+
+	return 0;
+}
+
+static int target_xcopy_setup_pt_cmd(
+	struct xcopy_pt_cmd *xpt_cmd,
+	struct xcopy_op *xop,
+	struct se_device *se_dev,
+	unsigned char *cdb,
+	bool remote_port,
+	bool alloc_mem)
+{
+	struct se_cmd *cmd = &xpt_cmd->se_cmd;
+	sense_reason_t sense_rc;
+	int ret = 0, rc;
+	/*
+	 * Setup LUN+port to honor reservations based upon xop->op_origin for
+	 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
+	 */
+	rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port);
+	if (rc < 0) {
+		ret = rc;
+		goto out;
+	}
+	xpt_cmd->xcopy_op = xop;
+	target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
+
+	sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
+	if (sense_rc) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (alloc_mem) {
+		rc = transport_generic_get_mem(cmd);
+		if (rc < 0) {
+			ret = rc;
+			goto out;
+		}
+		/*
+		 * Set this bit so that transport_free_pages() allows the
+		 * caller to release SGLs + physical memory allocated by
+		 * transport_generic_get_mem()..
+		 */
+		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+	} else {
+		/*
+		 * Here the previously allocated SGLs for the internal READ
+		 * are mapped zero-copy to the internal WRITE.
+		 */
+		sense_rc = transport_generic_map_mem_to_cmd(cmd,
+					xop->xop_data_sg, xop->xop_data_nents,
+					NULL, 0);
+		if (sense_rc) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		XCOPY_IO("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
+			 " %u\n", cmd->t_data_sg, cmd->t_data_nents);
+	}
+
+	return 0;
+
+out:
+	if (remote_port == true)
+		kfree(cmd->se_lun);
+	return ret;
+}
+
+static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
+{
+	struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
+	sense_reason_t sense_rc;
+
+	sense_rc = transport_generic_new_cmd(se_cmd);
+	if (sense_rc)
+		return -EINVAL;
+
+	if (se_cmd->data_direction == DMA_TO_DEVICE) {
+		if (wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_wsem) != 0)
+			return -EINVAL;
+
+		target_execute_cmd(se_cmd);
+	}
+
+	wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
+
+	XCOPY_IO("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
+			se_cmd->scsi_status);
+	return 0;
+}
+
+static int target_xcopy_read_source(
+	struct se_cmd *ec_cmd,
+	struct xcopy_op *xop,
+	struct se_device *src_dev,
+	sector_t src_lba,
+	u32 src_sectors)
+{
+	struct xcopy_pt_cmd *xpt_cmd;
+	struct se_cmd *se_cmd;
+	u32 length = (src_sectors * src_dev->dev_attrib.block_size);
+	int rc;
+	unsigned char cdb[16];
+	bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
+
+	xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
+	if (!xpt_cmd) {
+		pr_err("Unable to allocate xcopy_pt_cmd\n");
+		return -ENOMEM;
+	}
+	init_completion(&xpt_cmd->xpt_passthrough_wsem);
+	init_completion(&xpt_cmd->xpt_passthrough_sem);
+	se_cmd = &xpt_cmd->se_cmd;
+
+	memset(&cdb[0], 0, 16);
+	cdb[0] = READ_16;
+	put_unaligned_be64(src_lba, &cdb[2]);
+	put_unaligned_be32(src_sectors, &cdb[10]);
+	XCOPY_IO("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
+		(unsigned long long)src_lba, src_sectors, length);
+
+	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+				DMA_FROM_DEVICE, 0, NULL);
+	xop->src_pt_cmd = xpt_cmd;
+
+	rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
+				remote_port, true);
+	if (rc < 0) {
+		transport_generic_free_cmd(se_cmd, 0);
+		return rc;
+	}
+
+	xop->xop_data_sg = se_cmd->t_data_sg;
+	xop->xop_data_nents = se_cmd->t_data_nents;
+	XCOPY_IO("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
+		" memory\n", xop->xop_data_sg, xop->xop_data_nents);
+
+	rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+	if (rc < 0) {
+		transport_generic_free_cmd(se_cmd, 0);
+		return rc;
+	}
+	/*
+	 * Clear off the allocated t_data_sg, that has been saved for
+	 * zero-copy WRITE submission reuse in struct xcopy_op..
+	 */
+	se_cmd->t_data_sg = NULL;
+	se_cmd->t_data_nents = 0;
+
+	return 0;
+}
+
+static int target_xcopy_write_destination(
+	struct se_cmd *ec_cmd,
+	struct xcopy_op *xop,
+	struct se_device *dst_dev,
+	sector_t dst_lba,
+	u32 dst_sectors)
+{
+	struct xcopy_pt_cmd *xpt_cmd;
+	struct se_cmd *se_cmd;
+	u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
+	int rc;
+	unsigned char cdb[16];
+	bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
+
+	xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
+	if (!xpt_cmd) {
+		pr_err("Unable to allocate xcopy_pt_cmd\n");
+		return -ENOMEM;
+	}
+	init_completion(&xpt_cmd->xpt_passthrough_wsem);
+	init_completion(&xpt_cmd->xpt_passthrough_sem);
+	se_cmd = &xpt_cmd->se_cmd;
+
+	memset(&cdb[0], 0, 16);
+	cdb[0] = WRITE_16;
+	put_unaligned_be64(dst_lba, &cdb[2]);
+	put_unaligned_be32(dst_sectors, &cdb[10]);
+	XCOPY_IO("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
+		(unsigned long long)dst_lba, dst_sectors, length);
+
+	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+				DMA_TO_DEVICE, 0, NULL);
+	xop->dst_pt_cmd = xpt_cmd;
+
+	rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
+				remote_port, false);
+	if (rc < 0) {
+		struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+		/*
+		 * If the failure happened before the t_mem_list hand-off in
+		 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
+		 * core releases this memory on error during X-COPY WRITE I/O.
+		 */
+		src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+		src_cmd->t_data_sg = xop->xop_data_sg;
+		src_cmd->t_data_nents = xop->xop_data_nents;
+
+		transport_generic_free_cmd(se_cmd, 0);
+		return rc;
+	}
+
+	rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+	if (rc < 0) {
+		se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+		transport_generic_free_cmd(se_cmd, 0);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void target_xcopy_do_work(struct work_struct *work)
+{
+	struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
+	struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
+	struct se_cmd *ec_cmd = xop->xop_se_cmd;
+	sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
+	unsigned int max_sectors;
+	int rc;
+	unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
+
+	end_lba = src_lba + nolb;
+	/*
+	 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
+	 * smallest max_sectors between src_dev + dev_dev, or
+	 */
+	max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
+			  dst_dev->dev_attrib.hw_max_sectors);
+	max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
+
+	max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
+
+	XCOPY_CTL("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
+			nolb, max_nolb, (unsigned long long)end_lba);
+	XCOPY_CTL("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
+			(unsigned long long)src_lba, (unsigned long long)dst_lba);
+
+	while (src_lba < end_lba) {
+		cur_nolb = min(nolb, max_nolb);
+
+		XCOPY_IO("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
+			" cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
+
+		rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
+		if (rc < 0)
+			goto out;
+
+		src_lba += cur_nolb;
+		XCOPY_IO("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
+				(unsigned long long)src_lba);
+
+		XCOPY_IO("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
+			" cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
+
+		rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
+						dst_lba, cur_nolb);
+		if (rc < 0) {
+			transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+			goto out;
+		}
+
+		dst_lba += cur_nolb;
+		XCOPY_IO("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
+				(unsigned long long)dst_lba);
+
+		copied_nolb += cur_nolb;
+		nolb -= cur_nolb;
+
+		transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+		xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+		transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
+	}
+
+	xcopy_pt_undepend_remotedev(xop);
+	kfree(xop);
+
+	XCOPY_CTL("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
+		(unsigned long long)src_lba, (unsigned long long)dst_lba);
+	XCOPY_CTL("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
+		copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
+
+	XCOPY_CTL("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
+	target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
+	return;
+
+out:
+	xcopy_pt_undepend_remotedev(xop);
+	kfree(xop);
+
+	printk("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
+	ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+	target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
+}
+
+sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+{
+	struct xcopy_op *xop = NULL;
+	unsigned char *p = NULL, *seg_desc;
+	unsigned int list_id, list_id_usage, sdll, inline_dl;
+	int rc;
+	unsigned short tdll;
+
+	p = transport_kmap_data_sg(se_cmd);
+	if (!p) {
+		pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
+		return TCM_OUT_OF_RESOURCES;
+	}
+
+	list_id = p[0];
+	if (list_id != 0x00) {
+		pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id);
+		goto out;
+	}
+	/*
+	 * Check list id usage value:
+	 */
+	list_id_usage = (p[1] & 0x18);
+#if 0
+	if (list_id_usage != 0x0) {
+		pr_err("XCOPY with non zero list identifier not"
+			" supported: 0x%02x\n", list_id_usage);
+		goto out;
+	}
+#endif
+	/*
+	 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
+	 */
+	tdll = get_unaligned_be16(&p[2]);
+	sdll = get_unaligned_be32(&p[8]);
+
+	inline_dl = get_unaligned_be32(&p[12]);
+	if (inline_dl != 0) {
+		pr_err("XCOPY with non zero inline data length\n");
+		goto out;
+	}
+
+	xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
+	if (!xop) {
+		pr_err("Unable to allocate xcopy_op\n");
+		goto out;
+	}
+	xop->xop_se_cmd = se_cmd;
+
+	XCOPY_CTL("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
+		" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
+		tdll, sdll, inline_dl);
+
+	rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+	if (rc <= 0)
+		goto out;
+
+	XCOPY_CTL("XCOPY: Processed %d target descriptors, length: %u\n", rc,
+				rc * XCOPY_TARGET_DESC_LEN);
+	seg_desc = &p[16];
+	seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
+
+	rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
+	if (rc <= 0) {
+		xcopy_pt_undepend_remotedev(xop);
+		goto out;
+	}
+	transport_kunmap_data_sg(se_cmd);
+
+	XCOPY_CTL("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+				rc * XCOPY_SEGMENT_DESC_LEN);
+	INIT_WORK(&xop->xop_work, target_xcopy_do_work);
+	queue_work(xcopy_wq, &xop->xop_work);
+	return TCM_NO_SENSE;
+
+out:
+	if (p)
+		transport_kunmap_data_sg(se_cmd);
+	kfree(xop);
+	return TCM_INVALID_CDB_FIELD;
+}
+
+static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
+{
+	unsigned char *p;
+
+	p = transport_kmap_data_sg(se_cmd);
+	if (!p) {
+		pr_err("transport_kmap_data_sg failed in"
+		       " target_rcr_operating_parameters\n");
+		return TCM_OUT_OF_RESOURCES;
+	}
+
+	if (se_cmd->data_length < 54) {
+		pr_err("Receive Copy Results Op Parameters length"
+		       " too small: %u\n", se_cmd->data_length);
+		transport_kunmap_data_sg(se_cmd);
+		return TCM_INVALID_CDB_FIELD;
+	}
+	/*
+	 * Set SNLID=1 (Supports no List ID)
+	 */
+	p[4] = 0x1;
+	/*
+	 * MAXIMUM TARGET DESCRIPTOR COUNT
+	 */
+	put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
+	/*
+	 * MAXIMUM SEGMENT DESCRIPTOR COUNT
+	 */
+	put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
+	/*
+	 * MAXIMUM DESCRIPTOR LIST LENGTH
+	 */
+	put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
+	/*
+	 * MAXIMUM SEGMENT LENGTH
+	 */
+	put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
+	/*
+	 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
+	 */
+	put_unaligned_be32(0x0, &p[20]);
+	/*
+	 * HELD DATA LIMIT
+	 */
+	put_unaligned_be32(0x0, &p[24]);
+	/*
+	 * MAXIMUM STREAM DEVICE TRANSFER SIZE
+	 */
+	put_unaligned_be32(0x0, &p[28]);
+	/*
+	 * TOTAL CONCURRENT COPIES
+	 */
+	put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
+	/*
+	 * MAXIMUM CONCURRENT COPIES
+	 */
+	p[36] = RCR_OP_MAX_CONCURR_COPIES;
+	/*
+	 * DATA SEGMENT GRANULARITY (log 2)
+	 */
+	p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
+	/*
+	 * INLINE DATA GRANULARITY log 2)
+	 */
+	p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
+	/*
+	 * HELD DATA GRANULARITY
+	 */
+	p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
+	/*
+	 * IMPLEMENTED DESCRIPTOR LIST LENGTH
+	 */
+	p[43] = 0x2;
+	/*
+	 * List of implemented descriptor type codes (ordered)
+	 */
+	p[44] = 0x02; /* Copy Block to Block device */
+	p[45] = 0xe4; /* Identification descriptor target descriptor */
+
+	/*
+	 * AVAILABLE DATA (n-3)
+	 */
+	put_unaligned_be32(42, &p[0]);
+
+	transport_kunmap_data_sg(se_cmd);
+	target_complete_cmd(se_cmd, GOOD);
+
+	return TCM_NO_SENSE;
+}
+
+sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
+{
+	unsigned char *cdb = &se_cmd->t_task_cdb[0];
+	int sa = (cdb[1] & 0x1f), list_id = cdb[2];
+	int rc = TCM_NO_SENSE;
+
+	XCOPY_CTL("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
+		" 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
+
+	if (list_id != 0) {
+		pr_err("Receive Copy Results with non zero list identifier"
+		       " not supported\n");
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	switch (sa) {
+	case RCR_SA_OPERATING_PARAMETERS:
+		rc = target_rcr_operating_parameters(se_cmd);
+		break;
+	case RCR_SA_COPY_STATUS:
+	case RCR_SA_RECEIVE_DATA:
+	case RCR_SA_FAILED_SEGMENT_DETAILS:
+	default:
+		pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	return rc;
+}
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
new file mode 100644
index 0000000..700a981
--- /dev/null
+++ b/drivers/target/target_core_xcopy.h
@@ -0,0 +1,62 @@
+#define XCOPY_TARGET_DESC_LEN		32
+#define XCOPY_SEGMENT_DESC_LEN		28
+#define XCOPY_NAA_IEEE_REGEX_LEN	16
+#define XCOPY_MAX_SECTORS		1024
+
+enum xcopy_origin_list {
+	XCOL_SOURCE_RECV_OP = 0x01,
+	XCOL_DEST_RECV_OP = 0x02,
+};
+
+struct xcopy_pt_cmd;
+
+struct xcopy_op {
+	int op_origin;
+
+	struct se_cmd *xop_se_cmd;
+	struct se_device *src_dev;
+	unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+	struct se_device *dst_dev;
+	unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+	unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+
+	sector_t src_lba;
+	sector_t dst_lba;
+	unsigned short stdi;
+	unsigned short dtdi;
+	unsigned short nolb;
+	unsigned int dbl;
+
+	struct xcopy_pt_cmd *src_pt_cmd;
+	struct xcopy_pt_cmd *dst_pt_cmd;
+
+	u32 xop_data_nents;
+	struct scatterlist *xop_data_sg;
+	struct work_struct xop_work;
+};
+
+/*
+ * Receive Copy Results Sevice Actions
+ */
+#define RCR_SA_COPY_STATUS		0x00
+#define RCR_SA_RECEIVE_DATA		0x01
+#define RCR_SA_OPERATING_PARAMETERS	0x03
+#define RCR_SA_FAILED_SEGMENT_DETAILS	0x04
+
+/*
+ * Receive Copy Results defs for Operating Parameters
+ */
+#define RCR_OP_MAX_TARGET_DESC_COUNT	0x2
+#define RCR_OP_MAX_SG_DESC_COUNT	0x1
+#define RCR_OP_MAX_DESC_LIST_LEN	1024
+#define RCR_OP_MAX_SEGMENT_LEN		268435456 /* 256 MB */
+#define RCR_OP_TOTAL_CONCURR_COPIES	0x1 /* Must be <= 16384 */
+#define RCR_OP_MAX_CONCURR_COPIES	0x1 /* Must be <= 255 */
+#define RCR_OP_DATA_SEG_GRAN_LOG2	9 /* 512 bytes in log 2 */
+#define RCR_OP_INLINE_DATA_GRAN_LOG2	9 /* 512 bytes in log 2 */
+#define RCR_OP_HELD_DATA_GRAN_LOG2	9 /* 512 bytes in log 2 */
+
+extern int target_xcopy_setup_pt(void);
+extern void target_xcopy_release_pt(void);
+extern sense_reason_t target_do_xcopy(struct se_cmd *);
+extern sense_reason_t target_do_receive_copy_results(struct se_cmd *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 6b14f3c..f54a015 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -163,6 +163,7 @@ enum se_cmd_flags_table {
 	SCF_ACK_KREF			= 0x00040000,
 	SCF_COMPARE_AND_WRITE		= 0x00080000,
 	SCF_COMPARE_AND_WRITE_POST	= 0x00100000,
+	SCF_CMD_XCOPY_PASSTHROUGH	= 0x00200000,
 };
 
 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
-- 
1.7.2.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ