lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1331254019-31877-4-git-send-email-nab@linux-iscsi.org>
Date:	Fri,  9 Mar 2012 00:46:59 +0000
From:	"Nicholas A. Bellinger" <nab@...ux-iscsi.org>
To:	target-devel <target-devel@...r.kernel.org>,
	linux-scsi <linux-scsi@...r.kernel.org>,
	linux-kernel <linux-kernel@...r.kernel.org>
Cc:	Andrew Vasquez <andrew.vasquez@...gic.com>,
	Arun Easi <arun.easi@...gic.com>,
	Giridhar Malavali <giridhar.malavali@...gic.com>,
	James Bottomley <JBottomley@...allels.com>,
	Christoph Hellwig <hch@....de>,
	Roland Dreier <roland@...estorage.com>,
	Joern Engel <joern@...fs.org>,
	Madhuranath Iyengar <mni@...ingtidesystems.com>,
	Nicholas Bellinger <nab@...ux-iscsi.org>
Subject: [RFC-v5 3/3] tcm_qla2xxx: Add >= 24xx series fabric module for target-core

From: Nicholas Bellinger <nab@...ux-iscsi.org>

This patch adds support for tcm_qla2xxx fabric module for target-core using
the new qla_target.c LLD logic.  This includes support for explict NodeACLs
via configfs using tcm_qla2xxx_setup_nacl_from_rport() from libfc
struct fc_host->rports, and demo-mode support for virtual LUN=0 access.

This patch also adds support for using tcm_qla2xxx_lport->lport_fcport_map
and ->lport_loopid_map to track struct se_node_acl pointers for
individual 24-bit Port ID and 16-bit Loop ID values for qla_target_template
->find_sess_by_s_id() and ->find_sess_by_loop_id() used in a number of
locations into the primary I/O dispatch logic in qla_target.c LLD code.

The main piece for FC Nexus setup is in tcm_qla2xxx_check_initiator_node_acl(),
which calls tcm_qla2xxx_set_sess_by_[s_id,loop_id]() to setup our
lport->lport_fcport_map and lport_loopid_map pointers respectively, and
register the new nexus with TCM via __transport_register_session().

Cc: Andrew Vasquez <andrew.vasquez@...gic.com>
Cc: Arun Easi <arun.easi@...gic.com>
Cc: Giridhar Malavali <giridhar.malavali@...gic.com>
Cc: Christoph Hellwig <hch@....de>
Cc: James Bottomley <JBottomley@...allels.com>
Cc: Roland Dreier <roland@...estorage.com>
Cc: Joern Engel <joern@...fs.org>
Cc: Madhuranath Iyengar <mni@...ingtidesystems.com>
Signed-off-by: Nicholas A. Bellinger <nab@...ux-iscsi.org>
---
 drivers/scsi/qla2xxx/Kconfig       |    8 +
 drivers/scsi/qla2xxx/Makefile      |    1 +
 drivers/scsi/qla2xxx/tcm_qla2xxx.c | 1961 ++++++++++++++++++++++++++++++++++++
 drivers/scsi/qla2xxx/tcm_qla2xxx.h |  149 +++
 4 files changed, 2119 insertions(+), 0 deletions(-)
 create mode 100644 drivers/scsi/qla2xxx/tcm_qla2xxx.c
 create mode 100644 drivers/scsi/qla2xxx/tcm_qla2xxx.h

diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 6208d56..6c3ce52 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -25,3 +25,11 @@ config SCSI_QLA_FC
 	Firmware images can be retrieved from:
 
 		ftp://ftp.qlogic.com/outgoing/linux/firmware/
+
+config TCM_QLA2XXX
+	tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+	depends on SCSI_QLA_FC && TARGET_CORE
+	select LIBFC
+	default n
+	---help---
+	Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 702931ff..dce7d78 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -3,3 +3,4 @@ qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
         qla_nx.o qla_target.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
+obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 0000000..125b5d7
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,1961 @@
+/*******************************************************************************
+ * This file contains tcm implementation using v4 configfs fabric infrastructure
+ * for QLogic target mode HBAs
+ *
+ * (c) Copyright 2010-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@...ingtidesystems.com>
+ *
+ * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
+ * the TCM_FC / Open-FCoE.org fabric module.
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+#include "tcm_qla2xxx.h"
+
+extern struct workqueue_struct *tcm_qla2xxx_free_wq;
+extern struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+	const char *cp;
+	char c;
+	u32 nibble;
+	u32 byte = 0;
+	u32 pos = 0;
+	u32 err;
+
+	*wwn = 0;
+	for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
+		c = *cp;
+		if (c == '\n' && cp[1] == '\0')
+			continue;
+		if (strict && pos++ == 2 && byte++ < 7) {
+			pos = 0;
+			if (c == ':')
+				continue;
+			err = 1;
+			goto fail;
+		}
+		if (c == '\0') {
+			err = 2;
+			if (strict && byte != 8)
+				goto fail;
+			return cp - name;
+		}
+		err = 3;
+		if (isdigit(c))
+			nibble = c - '0';
+		else if (isxdigit(c) && (islower(c) || !strict))
+			nibble = tolower(c) - 'a' + 10;
+		else
+			goto fail;
+		*wwn = (*wwn << 4) | nibble;
+	}
+	err = 4;
+fail:
+	pr_debug("err %u len %zu pos %u byte %u\n",
+			err, cp - name, pos, byte);
+	return -1;
+}
+
+ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
+{
+	u8 b[8];
+
+	put_unaligned_be64(wwn, b);
+	return snprintf(buf, len,
+		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+		b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+char *tcm_qla2xxx_get_fabric_name(void)
+{
+	return "qla2xxx";
+}
+
+/*
+ * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
+ */
+static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
+{
+	unsigned int i, j, value;
+	u8 wwn[8];
+
+	memset(wwn, 0, sizeof(wwn));
+
+	/* Validate and store the new name */
+	for (i = 0, j = 0; i < 16; i++) {
+		value = hex_to_bin(*ns++);
+		if (value >= 0)
+			j = (j << 4) | value;
+		else
+			return -EINVAL;
+
+		if (i % 2) {
+			wwn[i/2] = j & 0xff;
+			j = 0;
+		}
+	}
+
+	*nm = wwn_to_u64(wwn);
+	return 0;
+}
+
+/*
+ * This parsing logic follows drivers/scsi/scsi_transport_fc.c:store_fc_host_vport_create()
+ */
+int tcm_qla2xxx_npiv_parse_wwn(
+	const char *name,
+	size_t count,
+	u64 *wwpn,
+	u64 *wwnn)
+{
+	unsigned int cnt = count;
+	int rc;
+
+	*wwpn = 0;
+	*wwnn = 0;
+
+	/* count may include a LF at end of string */
+	if (name[cnt-1] == '\n')
+		cnt--;
+
+	/* validate we have enough characters for WWPN */
+	if ((cnt != (16+1+16)) || (name[16] != ':'))
+		return -EINVAL;
+
+	rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
+	if (rc != 0)
+		return rc;
+
+	rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
+	if (rc != 0)
+		return rc;
+
+	return 0;
+}
+
+ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len, u64 wwpn, u64 wwnn)
+{
+	u8 b[8], b2[8];
+
+	put_unaligned_be64(wwpn, b);
+	put_unaligned_be64(wwnn, b2);
+        return snprintf(buf, len,
+                "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
+		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+                b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+		b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
+}
+
+char *tcm_qla2xxx_npiv_get_fabric_name(void)
+{
+	return "qla2xxx_npiv";
+}
+
+u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	u8 proto_id;
+
+	switch (lport->lport_proto_id) {
+	case SCSI_PROTOCOL_FCP:
+	default:
+		proto_id = fc_get_fabric_proto_ident(se_tpg);
+		break;
+	}
+
+	return proto_id;
+}
+
+char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+	return &lport->lport_name[0];
+}
+
+char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+	return &lport->lport_npiv_name[0];
+}
+
+u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	return tpg->lport_tpgt;
+}
+
+u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+u32 tcm_qla2xxx_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	int ret = 0;
+
+	switch (lport->lport_proto_id) {
+	case SCSI_PROTOCOL_FCP:
+	default:
+		ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+		break;
+	}
+
+	return ret;
+}
+
+u32 tcm_qla2xxx_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	int ret = 0;
+
+	switch (lport->lport_proto_id) {
+	case SCSI_PROTOCOL_FCP:
+	default:
+		ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+		break;
+	}
+
+	return ret;
+}
+
+char *tcm_qla2xxx_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	char *tid = NULL;
+
+	switch (lport->lport_proto_id) {
+	case SCSI_PROTOCOL_FCP:
+	default:
+		tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+		break;
+	}
+
+	return tid;
+}
+
+int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_nacl *nacl;
+
+	nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
+	if (!nacl) {
+		pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
+		return NULL;
+	}
+
+	return &nacl->se_node_acl;
+}
+
+void tcm_qla2xxx_release_fabric_acl(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl)
+{
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+			struct tcm_qla2xxx_nacl, se_node_acl);
+	kfree(nacl);
+}
+
+u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return tpg->lport_tpgt;
+}
+
+static void tcm_qla2xxx_complete_free(struct work_struct *work)
+{
+	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_cmd(), and will call
+ * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
+ * release callback.  qla_hw_data->hardware_lock is expected to be held
+ */
+void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+{
+	INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free() context
+ */
+int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+{
+	return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+}
+
+/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying fabric descriptor
+ * @se_cmd command to release
+ */
+void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd;
+
+	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+		struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+				struct qla_tgt_mgmt_cmd, se_cmd);
+		qla_tgt_free_mcmd(mcmd);
+		return;
+	}
+
+	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+	qla_tgt_free_cmd(cmd);
+}
+
+int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
+{
+	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+	struct scsi_qla_host *vha;
+	unsigned long flags;
+
+	BUG_ON(!sess);
+	vha = sess->vha;
+
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	sess->tearing_down = 1;
+	target_splice_sess_cmd_list(se_sess);
+	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+	return 1;
+}
+
+void tcm_qla2xxx_close_session(struct se_session *se_sess)
+{
+	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+	struct scsi_qla_host *vha;
+	unsigned long flags;
+
+	BUG_ON(!sess);
+	vha = sess->vha;
+
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	qla_tgt_unreg_sess(sess);
+	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+/*
+ * The LIO target core uses DMA_TO_DEVICE to mean that data is going
+ * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
+ * that data is coming from the target (eg handling a READ).  However,
+ * this is just the opposite of what we have to tell the DMA mapping
+ * layer -- eg when handling a READ, the HBA will have to DMA the data
+ * out of memory so it can send it to the initiator, which means we
+ * need to use DMA_TO_DEVICE when we map the data.
+ */
+static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
+{
+	if (se_cmd->se_cmd_flags & SCF_BIDI)
+		return DMA_BIDIRECTIONAL;
+
+	switch (se_cmd->data_direction) {
+	case DMA_TO_DEVICE:
+		return DMA_FROM_DEVICE;
+	case DMA_FROM_DEVICE:
+		return DMA_TO_DEVICE;
+	case DMA_NONE:
+	default:
+		return DMA_NONE;
+	}
+}
+
+int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+
+	cmd->bufflen = se_cmd->data_length;
+	cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+
+	/*
+	 * Setup the struct se_task->task_sg[] chained SG list
+	 */
+	transport_do_task_sg_chain(se_cmd);
+	cmd->sg_cnt = se_cmd->t_tasks_sg_chained_no;
+	cmd->sg = se_cmd->t_tasks_sg_chained;
+
+	/*
+	 * qla_target.c:qla_tgt_rdy_to_xfer() will call pci_map_sg() to setup
+	 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
+	 */
+	return qla_tgt_rdy_to_xfer(cmd);
+}
+
+int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+	/*
+	 * Check for WRITE_PENDING status to determine if we need to wait for
+	 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
+	 */
+	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+	if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
+	    se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+		wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 3000);
+		return 0;
+	}
+	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+	return 0;
+}
+
+void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
+{
+	return;
+}
+
+u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+
+	return cmd->tag;
+}
+
+int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+/*
+ * Called from process context in qla_target.c:qla_tgt_do_work() code
+ */
+int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+			unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
+			int data_dir, int bidi)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct se_session *se_sess;
+	struct qla_tgt_sess *sess;
+	int flags = TARGET_SCF_ACK_KREF;
+
+	if (bidi)
+		flags |= TARGET_SCF_BIDI_OP;
+
+	sess = cmd->sess;
+	if (!sess) {
+		pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+		return -EINVAL;
+	}
+
+	se_sess = sess->se_sess;
+	if (!se_sess) {
+		pr_err("Unable to locate active struct se_session\n");
+		return -EINVAL;
+	}
+
+	target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+				cmd->unpacked_lun, data_length, fcp_task_attr,
+				data_dir, flags);
+	return 0;
+}
+
+void tcm_qla2xxx_do_rsp(struct work_struct *work)
+{
+	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+	/*
+	 * Dispatch ->queue_status from workqueue process context
+	 */
+	transport_generic_request_failure(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qla_tgt_do_ctio_completion()
+ */
+int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	unsigned long flags;
+	/*
+	 * Ensure that the complete FCP WRITE payload has been received.
+	 * Otherwise return an exception via CHECK_CONDITION status.
+	 */
+	if (!cmd->write_data_transferred) {
+		/*
+		 * Check if se_cmd has already been aborted via LUN_RESET, and is
+		 * waiting upon completion in tcm_qla2xxx_write_pending_status()..
+		 */
+		spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+		if (se_cmd->transport_state & CMD_T_ABORTED) {
+			spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+			complete(&se_cmd->t_transport_stop_comp);
+			return 0;
+		}
+		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+		se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
+		INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
+		queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+		return 0;
+	}
+	/*
+	 * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
+	 * status to the backstore processing thread.
+	 */
+	return transport_generic_handle_data(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qla_tgt_issue_task_mgmt()
+ */
+int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+			uint8_t tmr_func, uint32_t tag)
+{
+	struct qla_tgt_sess *sess = mcmd->sess;
+	struct se_cmd *se_cmd = &mcmd->se_cmd;
+
+	return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
+			tmr_func, GFP_ATOMIC, tag, 0);
+}
+
+int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+
+	cmd->bufflen = se_cmd->data_length;
+	cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+	/*
+	 * Setup the struct se_task->task_sg[] chained SG list
+	 */
+	transport_do_task_sg_chain(se_cmd);
+	cmd->sg_cnt = se_cmd->t_tasks_sg_chained_no;
+	cmd->sg = se_cmd->t_tasks_sg_chained;
+	cmd->offset = 0;
+
+	/*
+	 * Now queue completed DATA_IN the qla2xxx LLD and response ring
+	 */
+	return qla_tgt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
+				se_cmd->scsi_status);
+}
+
+int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+	int xmit_type = QLA_TGT_XMIT_STATUS;
+
+	cmd->bufflen = se_cmd->data_length;
+	cmd->sg = NULL;
+	cmd->sg_cnt = 0;
+	cmd->offset = 0;
+	cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+	if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+		/*
+		 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
+		 * for qla_tgt_xmit_response LLD code
+		 */
+		se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+		se_cmd->residual_count = se_cmd->data_length;
+
+		cmd->bufflen = 0;
+	}
+	/*
+	 * Now queue status response to qla2xxx LLD code and response ring
+	 */
+	return qla_tgt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+}
+
+int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+	struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+				struct qla_tgt_mgmt_cmd, se_cmd);
+
+	pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
+			mcmd, se_tmr->function, se_tmr->response);
+	/*
+	 * Do translation between TCM TM response codes and
+	 * QLA2xxx FC TM response codes.
+	 */
+	switch (se_tmr->response) {
+	case TMR_FUNCTION_COMPLETE:
+		mcmd->fc_tm_rsp = FC_TM_SUCCESS;
+		break;
+	case TMR_TASK_DOES_NOT_EXIST:
+		mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
+		break;
+	case TMR_FUNCTION_REJECTED:
+		mcmd->fc_tm_rsp = FC_TM_REJECT;
+		break;
+	case TMR_LUN_DOES_NOT_EXIST:
+	default:
+		mcmd->fc_tm_rsp = FC_TM_FAILED;
+		break;
+	}
+	/*
+	 * Queue the TM response to QLA2xxx LLD to build a
+	 * CTIO response packet.
+	 */
+	qla_tgt_xmit_tm_rsp(mcmd);
+
+	return 0;
+}
+
+u16 tcm_qla2xxx_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+	return 0;
+}
+
+/* Local pointer to allocated TCM configfs fabric module */
+struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+
+struct workqueue_struct *tcm_qla2xxx_free_wq;
+struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+static int tcm_qla2xxx_setup_nacl_from_rport(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct tcm_qla2xxx_lport *lport,
+	struct tcm_qla2xxx_nacl *nacl,
+	u64 rport_wwnn)
+{
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct Scsi_Host *sh = vha->host;
+	struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+	struct fc_rport *rport;
+	struct tcm_qla2xxx_fc_domain *d;
+	struct tcm_qla2xxx_fc_area *a;
+	struct tcm_qla2xxx_fc_al_pa *p;
+	unsigned long flags;
+	unsigned char domain, area, al_pa;
+	/*
+	 * Scan the existing rports, and create a session for the
+	 * explict NodeACL is an matching rport->node_name already
+	 * exists.
+	 */
+	spin_lock_irqsave(sh->host_lock, flags);
+	list_for_each_entry(rport, &fc_host->rports, peers) {
+		if (rport_wwnn != rport->node_name)
+			continue;
+
+		pr_debug("Located existing rport_wwpn and rport->node_name:"
+			" 0x%016LX, port_id: 0x%04x\n", rport->node_name,
+			rport->port_id);
+		domain = (rport->port_id >> 16) & 0xff;
+		area = (rport->port_id >> 8) & 0xff;
+		al_pa = rport->port_id & 0xff;
+		nacl->nport_id = rport->port_id;
+
+		pr_debug("fc_rport domain: 0x%02x area: 0x%02x al_pa: %02x\n",
+				domain, area, al_pa);
+		spin_unlock_irqrestore(sh->host_lock, flags);
+
+
+		spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+		d = &((struct tcm_qla2xxx_fc_domain *)lport->lport_fcport_map)[domain];
+		pr_debug("Using d: %p for domain: 0x%02x\n", d, domain);
+		a = &d->areas[area];
+		pr_debug("Using a: %p for area: 0x%02x\n", a, area);
+		p = &a->al_pas[al_pa];
+		pr_debug("Using p: %p for al_pa: 0x%02x\n", p, al_pa);
+
+		p->se_nacl = se_nacl;
+		pr_debug("Setting p->se_nacl to se_nacl: %p for WWNN: 0x%016LX,"
+			" port_id: 0x%04x\n", se_nacl, rport_wwnn,
+			nacl->nport_id);
+		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+		return 1;
+	}
+	spin_unlock_irqrestore(sh->host_lock, flags);
+
+	return 0;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+{
+	struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+				struct tcm_qla2xxx_lport, lport_wwn);
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+				struct tcm_qla2xxx_nacl, se_node_acl);
+	struct tcm_qla2xxx_fc_domain *d;
+	struct tcm_qla2xxx_fc_area *a;
+	struct tcm_qla2xxx_fc_al_pa *p;
+	unsigned char domain, area, al_pa;
+
+	domain = (nacl->nport_id >> 16) & 0xff;
+	area = (nacl->nport_id >> 8) & 0xff;
+	al_pa = nacl->nport_id & 0xff;
+
+	pr_debug("fc_rport domain: 0x%02x area: 0x%02x al_pa: %02x\n",
+			domain, area, al_pa);
+
+	d = &((struct tcm_qla2xxx_fc_domain *)lport->lport_fcport_map)[domain];
+	pr_debug("Using d: %p for domain: 0x%02x\n", d, domain);
+	a = &d->areas[area];
+	pr_debug("Using a: %p for area: 0x%02x\n", a, area);
+	p = &a->al_pas[al_pa];
+	pr_debug("Using p: %p for al_pa: 0x%02x\n", p, al_pa);
+
+	p->se_nacl = NULL;
+	pr_debug("Clearing p->se_nacl to se_nacl: %p for WWNN: 0x%016LX,"
+		" port_id: 0x%04x\n", se_nacl, nacl->nport_wwnn,
+		nacl->nport_id);
+}
+
+void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
+{
+	target_put_session(sess->se_sess);
+}
+
+static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+				struct tcm_qla2xxx_lport, lport_wwn);
+	struct se_node_acl *se_nacl, *se_nacl_new;
+	struct tcm_qla2xxx_nacl *nacl;
+	u64 wwnn;
+	u32 qla2xxx_nexus_depth;
+	int rc;
+
+	if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
+		return ERR_PTR(-EINVAL);
+
+	se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
+	if (!se_nacl_new)
+		return ERR_PTR(-ENOMEM);
+//#warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl()
+	qla2xxx_nexus_depth = 1;
+
+	/*
+	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+	 * when converting a NodeACL from demo mode -> explict
+	 */
+	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+				name, qla2xxx_nexus_depth);
+	if (IS_ERR(se_nacl)) {
+		tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+		return se_nacl;
+	}
+	/*
+	 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
+	 */
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+	nacl->nport_wwnn = wwnn;
+	tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
+	/*
+	 * Setup a se_nacl handle based on an a matching struct fc_rport setup
+	 * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+	 */
+	rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport,
+					nacl, wwnn);
+	if (rc < 0) {
+		tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+		return ERR_PTR(rc);
+	}
+
+	return se_nacl;;
+}
+
+static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
+{
+	struct se_portal_group *se_tpg = se_acl->se_tpg;
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
+				struct tcm_qla2xxx_nacl, se_node_acl);
+
+	core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
+	kfree(nacl);
+}
+
+/* Start items for tcm_qla2xxx_tpg_attrib_cit */
+
+#define DEF_QLA_TPG_ATTRIB(name)					\
+									\
+static ssize_t tcm_qla2xxx_tpg_attrib_show_##name(			\
+	struct se_portal_group *se_tpg,					\
+	char *page)							\
+{									\
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,		\
+			struct tcm_qla2xxx_tpg, se_tpg);		\
+									\
+	return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name);	\
+}									\
+									\
+static ssize_t tcm_qla2xxx_tpg_attrib_store_##name(			\
+	struct se_portal_group *se_tpg,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,		\
+			struct tcm_qla2xxx_tpg, se_tpg);		\
+	unsigned long val;						\
+	int ret;							\
+									\
+	ret = strict_strtoul(page, 0, &val);				\
+	if (ret < 0) {							\
+		pr_err("strict_strtoul() failed with"		\
+				" ret: %d\n", ret);			\
+		return -EINVAL;						\
+	}								\
+	ret = tcm_qla2xxx_set_attrib_##name(tpg, val);			\
+									\
+	return (!ret) ? count : -EINVAL;				\
+}
+
+#define DEF_QLA_TPG_ATTR_BOOL(_name)					\
+									\
+static int tcm_qla2xxx_set_attrib_##_name(				\
+	struct tcm_qla2xxx_tpg *tpg,					\
+	unsigned long val)						\
+{									\
+	struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib;		\
+									\
+	if ((val != 0) && (val != 1)) {					\
+		pr_err("Illegal boolean value %lu\n", val);	\
+                return -EINVAL;						\
+	}								\
+									\
+	a->_name = val;							\
+	return 0;							\
+}
+
+#define QLA_TPG_ATTR(_name, _mode)	TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
+DEF_QLA_TPG_ATTRIB(generate_node_acls);
+QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+
+/*
+ Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
+DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
+QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
+QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
+QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
+	&tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
+	&tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
+	&tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
+	&tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+	NULL,
+};
+
+/* End items for tcm_qla2xxx_tpg_attrib_cit */
+
+static ssize_t tcm_qla2xxx_tpg_show_enable(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+
+	return snprintf(page, PAGE_SIZE, "%d\n",
+			atomic_read(&tpg->lport_tpg_enabled));
+}
+
+static ssize_t tcm_qla2xxx_tpg_store_enable(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+	char *endptr;
+	u32 op;
+
+	op = simple_strtoul(page, &endptr, 0);
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %u\n", op);
+		return -EINVAL;
+	}
+
+	if (op) {
+		atomic_set(&tpg->lport_tpg_enabled, 1);
+		qla_tgt_enable_vha(vha);
+	} else {
+		if (!ha->qla_tgt) {
+			pr_err("truct qla_hw_data *ha->qla_tgt is NULL\n");
+			return -ENODEV;
+		}
+		atomic_set(&tpg->lport_tpg_enabled, 0);
+		qla_tgt_stop_phase1(ha->qla_tgt);
+	}
+
+	return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
+	&tcm_qla2xxx_tpg_enable.attr,
+	NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct tcm_qla2xxx_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	if (!lport->qla_npiv_vp && (tpgt != 1)) {
+		pr_err("In non NPIV mode, a single TPG=1 is used for"
+			" HW port mappings\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tpg->lport = lport;
+	tpg->lport_tpgt = tpgt;
+	/*
+	 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic NodeACLs
+	 */
+	QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
+	QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
+	QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
+
+	ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
+				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		kfree(tpg);
+		return NULL;
+	}
+	/*
+	 * Setup local TPG=1 pointer for non NPIV mode.
+	 */
+	if (lport->qla_npiv_vp == NULL)
+		lport->tpg_1 = tpg;
+
+	return &tpg->se_tpg;
+}
+
+static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct qla_hw_data *ha = vha->hw;
+	/*
+	 * Call into qla2x_target.c LLD logic to shutdown the active
+	 * FC Nexuses and disable target mode operation for this qla_hw_data
+	 */
+	if (ha->qla_tgt && !ha->qla_tgt->tgt_stop)
+		qla_tgt_stop_phase1(ha->qla_tgt);
+
+	core_tpg_deregister(se_tpg);
+	/*
+	 * Clear local TPG=1 pointer for non NPIV mode.
+	 */
+	if (lport->qla_npiv_vp == NULL)
+		lport->tpg_1 = NULL;
+
+	kfree(tpg);
+}
+
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct tcm_qla2xxx_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tpg->lport = lport;
+	tpg->lport_tpgt = tpgt;
+
+	ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
+				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		kfree(tpg);
+		return NULL;
+	}
+	return &tpg->se_tpg;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+	scsi_qla_host_t *vha,
+	const uint8_t *s_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport;
+	struct se_node_acl *se_nacl;
+	struct tcm_qla2xxx_nacl *nacl;
+	struct tcm_qla2xxx_fc_domain *d;
+	struct tcm_qla2xxx_fc_area *a;
+	struct tcm_qla2xxx_fc_al_pa *p;
+	unsigned char domain, area, al_pa;
+
+	lport = ha->target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return NULL;
+	}
+
+	domain = s_id[0];
+	area = s_id[1];
+	al_pa = s_id[2];
+
+	pr_debug("find_sess_by_s_id: 0x%02x area: 0x%02x al_pa: %02x\n",
+			domain, area, al_pa);
+
+	d = &((struct tcm_qla2xxx_fc_domain *)lport->lport_fcport_map)[domain];
+	pr_debug("Using d: %p for domain: 0x%02x\n", d, domain);
+	a = &d->areas[area];
+	pr_debug("Using a: %p for area: 0x%02x\n", a, area);
+	p = &a->al_pas[al_pa];
+	pr_debug("Using p: %p for al_pa: 0x%02x\n", p, al_pa);
+
+	se_nacl = p->se_nacl;
+	if (!se_nacl) {
+		pr_debug("Unable to locate s_id: 0x%02x area: 0x%02x"
+			" al_pa: %02x\n", domain, area, al_pa);
+		return NULL;
+	}
+	pr_debug("find_sess_by_s_id: located se_nacl: %p,"
+		" initiatorname: %s\n", se_nacl, se_nacl->initiatorname);
+
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+	if (!nacl->qla_tgt_sess) {
+		pr_err("Unable to locate struct qla_tgt_sess\n");
+		return NULL;
+	}
+
+	return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_s_id(
+	struct tcm_qla2xxx_lport *lport,
+	struct se_node_acl *new_se_nacl,
+	struct tcm_qla2xxx_nacl *nacl,
+	struct se_session *se_sess,
+	struct qla_tgt_sess *qla_tgt_sess,
+	uint8_t *s_id)
+{
+	struct se_node_acl *saved_nacl;
+	struct tcm_qla2xxx_fc_domain *d;
+	struct tcm_qla2xxx_fc_area *a;
+	struct tcm_qla2xxx_fc_al_pa *p;
+	unsigned char domain, area, al_pa;
+
+	domain = s_id[0];
+	area = s_id[1];
+	al_pa = s_id[2];
+	pr_debug("set_sess_by_s_id: domain 0x%02x area: 0x%02x al_pa: %02x\n",
+			domain, area, al_pa);
+
+	d = &((struct tcm_qla2xxx_fc_domain *)lport->lport_fcport_map)[domain];
+	pr_debug("Using d: %p for domain: 0x%02x\n", d, domain);
+	a = &d->areas[area];
+	pr_debug("Using a: %p for area: 0x%02x\n", a, area);
+	p = &a->al_pas[al_pa];
+	pr_debug("Using p: %p for al_pa: 0x%02x\n", p, al_pa);
+
+	saved_nacl = p->se_nacl;
+	if (!saved_nacl) {
+		pr_debug("Setting up new p->se_nacl to new_se_nacl\n");
+		p->se_nacl = new_se_nacl;
+		qla_tgt_sess->se_sess = se_sess;
+		nacl->qla_tgt_sess = qla_tgt_sess;
+		return;
+	}
+
+	if (nacl->qla_tgt_sess) {
+		if (new_se_nacl == NULL) {
+			pr_debug("Clearing existing nacl->qla_tgt_sess"
+					" and p->se_nacl\n");
+			p->se_nacl = NULL;
+			nacl->qla_tgt_sess = NULL;
+			return;
+		}
+		pr_debug("Replacing existing nacl->qla_tgt_sess and"
+				" p->se_nacl\n");
+		p->se_nacl = new_se_nacl;
+		qla_tgt_sess->se_sess = se_sess;
+		nacl->qla_tgt_sess = qla_tgt_sess;
+		return;
+	}
+
+	if (new_se_nacl == NULL) {
+		pr_debug("Clearing existing p->se_nacl\n");
+		p->se_nacl = NULL;
+		return;
+	}
+
+	pr_debug("Replacing existing p->se_nacl w/o active"
+				" nacl->qla_tgt_sess\n");
+	p->se_nacl = new_se_nacl;
+	qla_tgt_sess->se_sess = se_sess;
+	nacl->qla_tgt_sess = qla_tgt_sess;
+
+	pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p,"
+		" initiatorname: %s\n", nacl->qla_tgt_sess, new_se_nacl,
+		new_se_nacl->initiatorname);
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+	scsi_qla_host_t *vha,
+	const uint16_t loop_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport;
+	struct se_node_acl *se_nacl;
+	struct tcm_qla2xxx_nacl *nacl;
+	struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+	lport = ha->target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return NULL;
+	}
+
+	pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+	fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)lport->lport_loopid_map)[loop_id];
+
+	se_nacl = fc_loopid->se_nacl;
+	if (!se_nacl) {
+		pr_debug("Unable to locate se_nacl by loop_id:"
+				" 0x%04x\n", loop_id);
+		return NULL;
+	}
+
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+	if (!nacl->qla_tgt_sess) {
+		pr_err("Unable to locate struct qla_tgt_sess\n");
+		return NULL;
+	}
+
+	return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_loop_id(
+	struct tcm_qla2xxx_lport *lport,
+	struct se_node_acl *new_se_nacl,
+	struct tcm_qla2xxx_nacl *nacl,
+	struct se_session *se_sess,
+	struct qla_tgt_sess *qla_tgt_sess,
+	uint16_t loop_id)
+{
+	struct se_node_acl *saved_nacl;
+	struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+	pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+	fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)lport->lport_loopid_map)[loop_id];
+
+	saved_nacl = fc_loopid->se_nacl;
+	if (!saved_nacl) {
+		pr_debug("Setting up new fc_loopid->se_nacl"
+				" to new_se_nacl\n");
+		fc_loopid->se_nacl = new_se_nacl;
+		if (qla_tgt_sess->se_sess != se_sess)
+			qla_tgt_sess->se_sess = se_sess;
+		if (nacl->qla_tgt_sess != qla_tgt_sess)
+			nacl->qla_tgt_sess = qla_tgt_sess;
+		return;
+	}
+
+	if (nacl->qla_tgt_sess) {
+		if (new_se_nacl == NULL) {
+			pr_debug("Clearing nacl->qla_tgt_sess and"
+					" fc_loopid->se_nacl\n");
+			fc_loopid->se_nacl = NULL;
+			nacl->qla_tgt_sess = NULL;
+			return;
+		}
+
+		pr_debug("Replacing existing nacl->qla_tgt_sess and"
+				" fc_loopid->se_nacl\n");
+		fc_loopid->se_nacl = new_se_nacl;
+		if (qla_tgt_sess->se_sess != se_sess)
+			qla_tgt_sess->se_sess = se_sess;
+		if (nacl->qla_tgt_sess != qla_tgt_sess)
+			nacl->qla_tgt_sess = qla_tgt_sess;
+		return;
+	}
+
+	if (new_se_nacl == NULL) {
+		pr_debug("Clearing fc_loopid->se_nacl\n");
+		fc_loopid->se_nacl = NULL;
+		return;
+	}
+
+	pr_debug("Replacing existing fc_loopid->se_nacl w/o"
+			" active nacl->qla_tgt_sess\n");
+	fc_loopid->se_nacl = new_se_nacl;
+	if (qla_tgt_sess->se_sess != se_sess)
+		qla_tgt_sess->se_sess = se_sess;
+	if (nacl->qla_tgt_sess != qla_tgt_sess)
+		nacl->qla_tgt_sess = qla_tgt_sess;
+
+	pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p,"
+		" initiatorname: %s\n", nacl->qla_tgt_sess, new_se_nacl,
+		new_se_nacl->initiatorname);
+}
+
+static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+{
+	struct qla_tgt *tgt = sess->tgt;
+	struct qla_hw_data *ha = tgt->ha;
+	struct se_session *se_sess;
+	struct se_node_acl *se_nacl;
+	struct tcm_qla2xxx_lport *lport;
+	struct tcm_qla2xxx_nacl *nacl;
+	unsigned char be_sid[3];
+	unsigned long flags;
+
+	BUG_ON(in_interrupt());
+
+	se_sess = sess->se_sess;
+	if (!se_sess) {
+		pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+		dump_stack();
+		return;
+	}
+	se_nacl = se_sess->se_node_acl;
+        nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+	lport = ha->target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return;
+	}
+	target_wait_for_sess_cmds(se_sess, 0);
+        /*
+         * And now clear the se_nacl and session pointers from our HW lport
+         * mappings for fabric S_ID and LOOP_ID.
+         */
+	memset(&be_sid, 0, 3);
+	be_sid[0] = sess->s_id.b.domain;
+	be_sid[1] = sess->s_id.b.area;
+	be_sid[2] = sess->s_id.b.al_pa;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+			sess, be_sid);
+	tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+			sess, sess->loop_id);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	transport_deregister_session_configfs(sess->se_sess);
+	transport_deregister_session(sess->se_sess);
+}
+
+/*
+ * Called via qla_tgt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
+ * to locate struct se_node_acl
+ */
+static int tcm_qla2xxx_check_initiator_node_acl(
+	scsi_qla_host_t *vha,
+	unsigned char *fc_wwpn,
+	void *qla_tgt_sess,
+	uint8_t *s_id,
+	uint16_t loop_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport;
+	struct tcm_qla2xxx_tpg *tpg;
+	struct tcm_qla2xxx_nacl *nacl;
+	struct se_portal_group *se_tpg;
+	struct se_node_acl *se_nacl;
+	struct se_session *se_sess;
+	struct qla_tgt_sess *sess = qla_tgt_sess;
+	unsigned char port_name[36];
+	unsigned long flags;
+
+	lport = ha->target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return -EINVAL;
+	}
+	/*
+	 * Locate the TPG=1 reference..
+	 */
+	tpg = lport->tpg_1;
+	if (!tpg) {
+		pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+		return -EINVAL;
+	}
+	se_tpg = &tpg->se_tpg;
+
+	se_sess = transport_init_session();
+	if (IS_ERR(se_sess)) {
+		pr_err("Unable to initialize struct se_session\n");
+		return PTR_ERR(se_sess);
+	}
+	/*
+	 * Format the FCP Initiator port_name into colon seperated values to match
+	 * the format by tcm_qla2xxx explict ConfigFS NodeACLs.
+	 */
+	memset(&port_name, 0, 36);
+	snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+		fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
+		fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+	/*
+	 * Locate our struct se_node_acl either from an explict NodeACL created
+	 * via ConfigFS, or via running in TPG demo mode.
+	 */
+	se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg, port_name);
+	if (!se_sess->se_node_acl) {
+		transport_free_session(se_sess);
+		return -EINVAL;
+	}
+	se_nacl = se_sess->se_node_acl;
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+	/*
+	 * And now setup the new se_nacl and session pointers into our HW lport
+	 * mappings for fabric S_ID and LOOP_ID.
+	 */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
+			qla_tgt_sess, s_id);
+	tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
+			qla_tgt_sess, loop_id);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	/*
+	 * Finally register the new FC Nexus with TCM
+	 */
+	__transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+
+	return 0;
+}
+
+/*
+ * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
+ */
+static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+	.handle_cmd		= tcm_qla2xxx_handle_cmd,
+	.handle_data		= tcm_qla2xxx_handle_data,
+	.handle_tmr		= tcm_qla2xxx_handle_tmr,
+	.free_cmd		= tcm_qla2xxx_free_cmd,
+	.free_session		= tcm_qla2xxx_free_session,
+	.check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
+	.find_sess_by_s_id	= tcm_qla2xxx_find_sess_by_s_id,
+	.find_sess_by_loop_id	= tcm_qla2xxx_find_sess_by_loop_id,
+	.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+	.put_sess		= tcm_qla2xxx_put_sess,
+};
+
+static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+{
+	lport->lport_fcport_map = vmalloc(
+			sizeof(struct tcm_qla2xxx_fc_domain) * 256);
+	if (!lport->lport_fcport_map) {
+		pr_err("Unable to allocate lport_fcport_map of %lu"
+			" bytes\n", sizeof(struct tcm_qla2xxx_fc_domain) * 256);
+		return -ENOMEM;
+	}
+	memset(lport->lport_fcport_map, 0,
+			sizeof(struct tcm_qla2xxx_fc_domain) * 256);
+	pr_debug("qla2xxx: Allocated lport_fcport_map of %lu bytes\n",
+			sizeof(struct tcm_qla2xxx_fc_domain) * 256);
+
+	lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
+				65536);
+	if (!lport->lport_loopid_map) {
+		pr_err("Unable to allocate lport->lport_loopid_map"
+			" of %lu bytes\n", sizeof(struct tcm_qla2xxx_fc_loopid)
+			* 65536);
+		vfree(lport->lport_fcport_map);
+		return -ENOMEM;
+	}
+	memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
+			* 65536);
+	pr_debug("qla2xxx: Allocated lport_loopid_map of %lu bytes\n",
+			sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+	return 0;
+}
+
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport;
+	/*
+	 * Setup local pointer to vha, NPIV VP pointer (if present) and
+	 * vha->tcm_lport pointer
+	 */
+	lport = (struct tcm_qla2xxx_lport *)ha->target_lport_ptr;
+	lport->qla_vha = vha;
+
+	return 0;
+}
+
+static struct se_wwn *tcm_qla2xxx_make_lport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport;
+	u64 wwpn;
+	int ret = -ENODEV;
+
+	if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL);
+
+	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+	if (!lport) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	lport->lport_wwpn = wwpn;
+	tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, wwpn);
+
+	ret = tcm_qla2xxx_init_lport(lport);
+	if (ret != 0)
+		goto out;
+
+	ret = qla_tgt_lport_register(&tcm_qla2xxx_template, wwpn,
+				tcm_qla2xxx_lport_register_cb, lport);
+	if (ret != 0)
+		goto out_lport;
+
+	return &lport->lport_wwn;
+out_lport:
+	vfree(lport->lport_loopid_map);
+	vfree(lport->lport_fcport_map);
+out:
+	kfree(lport);
+	return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct qla_hw_data *ha = vha->hw;
+	/*
+	 * Call into qla2x_target.c LLD logic to complete the
+	 * shutdown of struct qla_tgt after the call to
+	 * qla_tgt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
+	 */
+	if (ha->qla_tgt && !ha->qla_tgt->tgt_stopped)
+		qla_tgt_stop_phase2(ha->qla_tgt);
+
+	qla_tgt_lport_deregister(vha);
+
+	vfree(lport->lport_loopid_map);
+	vfree(lport->lport_fcport_map);
+	kfree(lport);
+}
+
+static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport;
+	u64 npiv_wwpn, npiv_wwnn;
+	int ret;
+
+	if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
+				&npiv_wwpn, &npiv_wwnn) < 0)
+		return ERR_PTR(-EINVAL);
+
+	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+	if (!lport) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_lport"
+				" for NPIV\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	lport->lport_npiv_wwpn = npiv_wwpn;
+	lport->lport_npiv_wwnn = npiv_wwnn;
+	tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
+			TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
+
+/* FIXME: tcm_qla2xxx_npiv_make_lport */
+	ret = -ENOSYS;
+	if (ret != 0)
+		goto out;
+
+	return &lport->lport_wwn;
+out:
+	kfree(lport);
+	return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct Scsi_Host *sh = vha->host;
+	/*
+	 * Notify libfc that we want to release the lport->npiv_vport
+	 */
+	fc_vport_terminate(lport->npiv_vport);
+
+	scsi_host_put(sh);
+	kfree(lport);
+}
+
+
+static ssize_t tcm_qla2xxx_wwn_show_attr_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page, "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s"
+		" on "UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+		utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_qla2xxx, version);
+
+static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
+	&tcm_qla2xxx_wwn_version.attr,
+	NULL,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_ops = {
+	.get_fabric_name		= tcm_qla2xxx_get_fabric_name,
+	.get_fabric_proto_ident		= tcm_qla2xxx_get_fabric_proto_ident,
+	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
+	.tpg_get_tag			= tcm_qla2xxx_get_tag,
+	.tpg_get_default_depth		= tcm_qla2xxx_get_default_depth,
+	.tpg_get_pr_transport_id	= tcm_qla2xxx_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= tcm_qla2xxx_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= tcm_qla2xxx_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode,
+	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_demo_mode_cache,
+	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_write_protect,
+	.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_prod_write_protect,
+	.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
+	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
+	.new_cmd_map			= NULL,
+	.check_stop_free		= tcm_qla2xxx_check_stop_free,
+	.release_cmd			= tcm_qla2xxx_release_cmd,
+	.shutdown_session		= tcm_qla2xxx_shutdown_session,
+	.close_session			= tcm_qla2xxx_close_session,
+	.sess_get_index			= tcm_qla2xxx_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= tcm_qla2xxx_write_pending,
+	.write_pending_status		= tcm_qla2xxx_write_pending_status,
+	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
+	.get_task_tag			= tcm_qla2xxx_get_task_tag,
+	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
+	.queue_data_in			= tcm_qla2xxx_queue_data_in,
+	.queue_status			= tcm_qla2xxx_queue_status,
+	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.get_fabric_sense_len		= tcm_qla2xxx_get_fabric_sense_len,
+	.set_fabric_sense_len		= tcm_qla2xxx_set_fabric_sense_len,
+	/*
+	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn		= tcm_qla2xxx_make_lport,
+	.fabric_drop_wwn		= tcm_qla2xxx_drop_lport,
+	.fabric_make_tpg		= tcm_qla2xxx_make_tpg,
+	.fabric_drop_tpg		= tcm_qla2xxx_drop_tpg,
+	.fabric_post_link		= NULL,
+	.fabric_pre_unlink		= NULL,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_make_nodeacl		= tcm_qla2xxx_make_nodeacl,
+	.fabric_drop_nodeacl		= tcm_qla2xxx_drop_nodeacl,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+	.get_fabric_name		= tcm_qla2xxx_npiv_get_fabric_name,
+	.get_fabric_proto_ident		= tcm_qla2xxx_get_fabric_proto_ident,
+	.tpg_get_wwn			= tcm_qla2xxx_npiv_get_fabric_wwn,
+	.tpg_get_tag			= tcm_qla2xxx_get_tag,
+	.tpg_get_default_depth		= tcm_qla2xxx_get_default_depth,
+	.tpg_get_pr_transport_id	= tcm_qla2xxx_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= tcm_qla2xxx_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= tcm_qla2xxx_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= tcm_qla2xxx_check_false,
+	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_true,
+	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
+	.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+	.tpg_check_demo_mode_login_only	= tcm_qla2xxx_check_true,
+	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
+	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
+	.release_cmd			= tcm_qla2xxx_release_cmd,
+	.shutdown_session		= tcm_qla2xxx_shutdown_session,
+	.close_session			= tcm_qla2xxx_close_session,
+	.sess_get_index			= tcm_qla2xxx_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= tcm_qla2xxx_write_pending,
+	.write_pending_status		= tcm_qla2xxx_write_pending_status,
+	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
+	.get_task_tag			= tcm_qla2xxx_get_task_tag,
+	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
+	.queue_data_in			= tcm_qla2xxx_queue_data_in,
+	.queue_status			= tcm_qla2xxx_queue_status,
+	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.get_fabric_sense_len		= tcm_qla2xxx_get_fabric_sense_len,
+	.set_fabric_sense_len		= tcm_qla2xxx_set_fabric_sense_len,
+	/*
+	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn		= tcm_qla2xxx_npiv_make_lport,
+	.fabric_drop_wwn		= tcm_qla2xxx_npiv_drop_lport,
+	.fabric_make_tpg		= tcm_qla2xxx_npiv_make_tpg,
+	.fabric_drop_tpg		= tcm_qla2xxx_drop_tpg,
+	.fabric_post_link		= NULL,
+	.fabric_pre_unlink		= NULL,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_make_nodeacl		= tcm_qla2xxx_make_nodeacl,
+	.fabric_drop_nodeacl		= tcm_qla2xxx_drop_nodeacl,
+};
+
+static int tcm_qla2xxx_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric, *npiv_fabric;
+	int ret;
+
+	pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s"
+		" on "UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+		utsname()->machine);
+	/*
+	 * Register the top level struct config_item_type with TCM core
+	 */
+	fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
+	if (IS_ERR(fabric)) {
+		pr_err("target_fabric_configfs_init() failed\n");
+		return PTR_ERR(fabric);
+	}
+	/*
+	 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
+	 */
+	fabric->tf_ops = tcm_qla2xxx_ops;
+	/*
+	 * Setup the struct se_task->task_sg[] chaining bit
+	 */
+	fabric->tf_ops.task_sg_chaining = 1;
+	/*
+	 * Setup default attribute lists for various fabric->tf_cit_tmpl
+	 */
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = tcm_qla2xxx_tpg_attrib_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+	/*
+	 * Register the fabric for use within TCM
+	 */
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		pr_err("target_fabric_configfs_register() failed"
+				" for TCM_QLA2XXX\n");
+		return ret;
+	}
+	/*
+	 * Setup our local pointer to *fabric
+	 */
+	tcm_qla2xxx_fabric_configfs = fabric;
+	pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
+
+	/*
+	 * Register the top level struct config_item_type for NPIV with TCM core
+	 */
+	npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
+	if (!npiv_fabric) {
+		pr_err("target_fabric_configfs_init() failed\n");
+		ret = -ENOMEM;
+		goto out_fabric;
+	}
+	/*
+	 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
+	 */
+	npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
+	/*
+	 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
+	 */
+	TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+	/*
+	 * Register the npiv_fabric for use within TCM
+	 */
+	ret = target_fabric_configfs_register(npiv_fabric);
+	if (ret < 0) {
+		pr_err("target_fabric_configfs_register() failed"
+				" for TCM_QLA2XXX\n");
+		goto out_fabric;
+	}
+	/*
+	 * Setup our local pointer to *npiv_fabric
+	 */
+	tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
+	pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
+
+	tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
+						WQ_MEM_RECLAIM, 0);
+	if (!tcm_qla2xxx_free_wq) {
+		ret = -ENOMEM;
+		goto out_fabric_npiv;
+	}
+
+	tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
+	if (!tcm_qla2xxx_cmd_wq) {
+		ret = -ENOMEM;
+		goto out_free_wq;
+	}
+
+	return 0;
+
+out_free_wq:
+	destroy_workqueue(tcm_qla2xxx_free_wq);
+out_fabric_npiv:
+	target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+out_fabric:
+	target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+	return ret;
+}
+
+static void tcm_qla2xxx_deregister_configfs(void)
+{
+	destroy_workqueue(tcm_qla2xxx_cmd_wq);
+	destroy_workqueue(tcm_qla2xxx_free_wq);
+
+	target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+	tcm_qla2xxx_fabric_configfs = NULL;
+	pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
+
+	target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+	tcm_qla2xxx_npiv_fabric_configfs = NULL;
+	pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
+}
+
+static int __init tcm_qla2xxx_init(void)
+{
+	int ret;
+
+	ret = tcm_qla2xxx_register_configfs();
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void __exit tcm_qla2xxx_exit(void)
+{
+	tcm_qla2xxx_deregister_configfs();
+}
+
+MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_qla2xxx_init);
+module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 0000000..e700c15
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,149 @@
+#include <target/target_core_base.h>
+
+#define TCM_QLA2XXX_VERSION	"v0.1"
+/* length of ASCII WWPNs including pad */
+#define TCM_QLA2XXX_NAMELEN	32
+/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
+#define TCM_QLA2XXX_NPIV_NAMELEN 66
+
+#include "qla_target.h"
+
+struct tcm_qla2xxx_nacl {
+	/* From libfc struct fc_rport->port_id */
+	u16 nport_id;
+	/* Binary World Wide unique Node Name for remote FC Initiator Nport */
+	u64 nport_wwnn;
+	/* ASCII formatted WWPN for FC Initiator Nport */
+	char nport_name[TCM_QLA2XXX_NAMELEN];
+	/* Pointer to qla_tgt_sess */
+	struct qla_tgt_sess *qla_tgt_sess;
+	/* Pointer to TCM FC nexus */
+	struct se_session *nport_nexus;
+	/* Returned by tcm_qla2xxx_make_nodeacl() */
+	struct se_node_acl se_node_acl;
+};
+
+struct tcm_qla2xxx_tpg_attrib {
+	int generate_node_acls;
+	int cache_dynamic_acls;
+	int demo_mode_write_protect;
+	int prod_mode_write_protect;
+};
+
+struct tcm_qla2xxx_tpg {
+	/* FC lport target portal group tag for TCM */
+	u16 lport_tpgt;
+	/* Atomic bit to determine TPG active status */
+	atomic_t lport_tpg_enabled;
+	/* Pointer back to tcm_qla2xxx_lport */
+	struct tcm_qla2xxx_lport *lport;
+	/* Used by tcm_qla2xxx_tpg_attrib_cit */
+	struct tcm_qla2xxx_tpg_attrib tpg_attrib;
+	/* Returned by tcm_qla2xxx_make_tpg() */
+	struct se_portal_group se_tpg;
+};
+
+#define QLA_TPG_ATTRIB(tpg)	(&(tpg)->tpg_attrib)
+
+/*
+ * Used for the 24-bit lport->lport_fcport_map;
+ */
+struct tcm_qla2xxx_fc_al_pa {
+	struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_fc_area {
+        struct tcm_qla2xxx_fc_al_pa al_pas[256];
+};
+
+struct tcm_qla2xxx_fc_domain {
+        struct tcm_qla2xxx_fc_area areas[256];
+};
+
+struct tcm_qla2xxx_fc_loopid {
+	struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_lport {
+	/* SCSI protocol the lport is providing */
+	u8 lport_proto_id;
+	/* Binary World Wide unique Port Name for FC Target Lport */
+	u64 lport_wwpn;
+	/* Binary World Wide unique Port Name for FC NPIV Target Lport */
+	u64 lport_npiv_wwpn;
+	/* Binary World Wide unique Node Name for FC NPIV Target Lport */
+	u64 lport_npiv_wwnn;
+	/* ASCII formatted WWPN for FC Target Lport */
+	char lport_name[TCM_QLA2XXX_NAMELEN];
+	/* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
+	char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
+	/* vmalloc'ed memory for fc_port pointers in 24-bit FC Port ID space */
+	char *lport_fcport_map;
+	/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+	char *lport_loopid_map;
+	/* Pointer to struct scsi_qla_host from qla2xxx LLD */
+	struct scsi_qla_host *qla_vha;
+	/* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
+	struct scsi_qla_host *qla_npiv_vp;
+	/* Pointer to struct qla_tgt pointer */
+	struct qla_tgt lport_qla_tgt;
+	/* Pointer to struct fc_vport for NPIV vport from libfc */
+	struct fc_vport *npiv_vport;
+	/* Pointer to TPG=1 for non NPIV mode */
+	struct tcm_qla2xxx_tpg *tpg_1;
+	/* Returned by tcm_qla2xxx_make_lport() */
+	struct se_wwn lport_wwn;
+};
+
+extern int tcm_qla2xxx_check_true(struct se_portal_group *);
+extern int tcm_qla2xxx_check_false(struct se_portal_group *);
+extern ssize_t tcm_qla2xxx_parse_wwn(const char *, u64 *, int);
+extern ssize_t tcm_qla2xxx_format_wwn(char *, size_t, u64);
+extern char *tcm_qla2xxx_get_fabric_name(void);
+extern int tcm_qla2xxx_npiv_parse_wwn(const char *name, size_t, u64 *, u64 *);
+extern ssize_t tcm_qla2xxx_npiv_format_wwn(char *, size_t, u64, u64);
+extern char *tcm_qla2xxx_npiv_get_fabric_name(void);
+extern u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *);
+extern char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *);
+extern char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *);
+extern u16 tcm_qla2xxx_get_tag(struct se_portal_group *);
+extern u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *);
+extern u32 tcm_qla2xxx_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 tcm_qla2xxx_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *tcm_qla2xxx_parse_pr_out_transport_id(struct se_portal_group *, const char *,
+				u32 *, char **);
+extern int tcm_qla2xxx_check_demo_mode(struct se_portal_group *);
+extern int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *);
+extern int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *);
+extern int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *);
+extern struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(struct se_portal_group *);
+extern void tcm_qla2xxx_release_fabric_acl(struct se_portal_group *, struct se_node_acl *);
+extern u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *);
+extern void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *);
+extern int tcm_qla2xxx_check_stop_free(struct se_cmd *);
+extern void tcm_qla2xxx_release_cmd(struct se_cmd *);
+extern int tcm_qla2xxx_shutdown_session(struct se_session *);
+extern void tcm_qla2xxx_close_session(struct se_session *);
+extern void tcm_qla2xxx_stop_session(struct se_session *, int, int);
+extern void tcm_qla2xxx_reset_nexus(struct se_session *);
+extern int tcm_qla2xxx_sess_logged_in(struct se_session *);
+extern u32 tcm_qla2xxx_sess_get_index(struct se_session *);
+extern int tcm_qla2xxx_write_pending(struct se_cmd *);
+extern int tcm_qla2xxx_write_pending_status(struct se_cmd *);
+extern void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *);
+extern u32 tcm_qla2xxx_get_task_tag(struct se_cmd *);
+extern int tcm_qla2xxx_get_cmd_state(struct se_cmd *);
+extern int tcm_qla2xxx_handle_cmd(struct scsi_qla_host *, struct qla_tgt_cmd *,
+			unsigned char *, uint32_t, int, int, int);
+extern int tcm_qla2xxx_new_cmd_map(struct se_cmd *);
+extern int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *);
+extern int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *, uint32_t,
+				uint8_t, uint32_t);
+extern int tcm_qla2xxx_queue_data_in(struct se_cmd *);
+extern int tcm_qla2xxx_queue_status(struct se_cmd *);
+extern int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *);
+extern u16 tcm_qla2xxx_get_fabric_sense_len(void);
+extern u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *, u32);
+extern int tcm_qla2xxx_is_state_remove(struct se_cmd *);
-- 
1.7.2.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ