[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210114203148.246656-16-tyreld@linux.ibm.com>
Date: Thu, 14 Jan 2021 14:31:42 -0600
From: Tyrel Datwyler <tyreld@...ux.ibm.com>
To: james.bottomley@...senpartnership.com
Cc: martin.petersen@...cle.com, linux-scsi@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org,
brking@...ux.ibm.com, Tyrel Datwyler <tyreld@...ux.ibm.com>,
Brian King <brking@...ux.vnet.ibm.com>
Subject: [PATCH v5 15/21] ibmvfc: send commands down HW Sub-CRQ when channelized
When the client has negotiated the use of channels all vfcFrames are
required to go down a Sub-CRQ channel or it is a protocoal violation. If
the adapter state is channelized submit vfcFrames to the appropriate
Sub-CRQ via the h_send_sub_crq() helper.
Signed-off-by: Tyrel Datwyler <tyreld@...ux.ibm.com>
Reviewed-by: Brian King <brking@...ux.vnet.ibm.com>
---
drivers/scsi/ibmvscsi/ibmvfc.c | 39 ++++++++++++++++++++++++++++------
1 file changed, 33 insertions(+), 6 deletions(-)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 3f3cc37a263f..865b87881d86 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -704,6 +704,15 @@ static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
+static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
+ u64 word2, u64 word3, u64 word4)
+{
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+
+ return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
+ word1, word2, word3, word4);
+}
+
/**
* ibmvfc_send_crq_init - Send a CRQ init message
* @vhost: ibmvfc host struct
@@ -1623,8 +1632,17 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
mb();
- if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
- be64_to_cpu(crq_as_u64[1])))) {
+ if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
+ rc = ibmvfc_send_sub_crq(vhost,
+ evt->queue->vios_cookie,
+ be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]),
+ 0, 0);
+ else
+ rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]));
+
+ if (rc) {
list_del(&evt->queue_list);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
del_timer(&evt->timer);
@@ -1842,6 +1860,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct ibmvfc_event *evt;
u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
+ u16 scsi_channel;
int rc;
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
@@ -1852,7 +1871,13 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
cmnd->result = (DID_OK << 16);
- evt = ibmvfc_get_event(&vhost->crq);
+ if (vhost->using_channels) {
+ scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
+ evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+ } else
+ evt = ibmvfc_get_event(&vhost->crq);
+
ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
evt->cmnd = cmnd;
@@ -1868,8 +1893,6 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
vfc_cmd->correlation = cpu_to_be64((u64)evt);
- if (vhost->using_channels)
- evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0);
@@ -2200,7 +2223,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
spin_lock_irqsave(vhost->host->host_lock, flags);
if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(&vhost->crq);
+ if (vhost->using_channels)
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
+ else
+ evt = ibmvfc_get_event(&vhost->crq);
+
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
tmf = ibmvfc_init_vfc_cmd(evt, sdev);
iu = ibmvfc_get_fcp_iu(vhost, tmf);
--
2.27.0
Powered by blists - more mailing lists