[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201202005329.4538-16-tyreld@linux.ibm.com>
Date: Tue, 1 Dec 2020 18:53:27 -0600
From: Tyrel Datwyler <tyreld@...ux.ibm.com>
To: james.bottomley@...senpartnership.com
Cc: martin.petersen@...cle.com, linux-scsi@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org,
brking@...ux.ibm.com, Tyrel Datwyler <tyreld@...ux.ibm.com>
Subject: [PATCH v2 15/17] ibmvfc: send Cancel MAD down each hw scsi channel
In general the client needs to send Cancel MADs and task management
commands down the same channel as the command(s) intended to cancel or
abort. The client assigns cancel keys per LUN and thus must send a
Cancel down each channel commands were submitted for that LUN. Further,
the client then must wait for those cancel completions prior to
submitting a LUN RESET or ABORT TASK SET.
Allocate event pointers for each possible scsi channel and assign an
event for each channel that requires a cancel. Wait for completion each
submitted cancel.
Signed-off-by: Tyrel Datwyler <tyreld@...ux.ibm.com>
---
drivers/scsi/ibmvscsi/ibmvfc.c | 106 +++++++++++++++++++++------------
1 file changed, 68 insertions(+), 38 deletions(-)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 0b6284020f06..97e8eed04b01 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2339,32 +2339,52 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct ibmvfc_event *evt, *found_evt;
- union ibmvfc_iu rsp;
- int rsp_rc = -EBUSY;
+ struct ibmvfc_event **evt_list;
+ union ibmvfc_iu *rsp;
+ int rsp_rc = 0;
unsigned long flags;
u16 status;
+ int num_hwq = 1;
+ int i;
+ int ret = 0;
ENTER;
spin_lock_irqsave(vhost->host->host_lock, flags);
- found_evt = NULL;
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (evt->cmnd && evt->cmnd->device == sdev) {
- found_evt = evt;
- break;
+ if (vhost->using_channels && vhost->scsi_scrqs.active_queues)
+ num_hwq = vhost->scsi_scrqs.active_queues;
+
+ evt_list = kcalloc(num_hwq, sizeof(*evt_list), GFP_KERNEL);
+ rsp = kcalloc(num_hwq, sizeof(*rsp), GFP_KERNEL);
+
+ for (i = 0; i < num_hwq; i++) {
+ sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands on queue %d.\n", i);
+
+ found_evt = NULL;
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (evt->cmnd && evt->cmnd->device == sdev && evt->hwq == i) {
+ found_evt = evt;
+ break;
+ }
}
- }
- if (!found_evt) {
- if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
- sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- return 0;
- }
+ if (!found_evt) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to cancel on queue %d\n", i);
+ continue;
+ }
- if (vhost->logged_in) {
- evt = ibmvfc_init_tmf(vhost, sdev, type);
- evt->sync_iu = &rsp;
- rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+
+ if (vhost->logged_in) {
+ evt_list[i] = ibmvfc_init_tmf(vhost, sdev, type);
+ evt_list[i]->hwq = i;
+ evt_list[i]->sync_iu = &rsp[i];
+ rsp_rc = ibmvfc_send_event(evt_list[i], vhost, default_timeout);
+ if (rsp_rc)
+ break;
+ } else {
+ rsp_rc = -EBUSY;
+ break;
+ }
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -2374,32 +2394,42 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
/* If failure is received, the host adapter is most likely going
through reset, return success so the caller will wait for the command
being cancelled to get returned */
- return 0;
+ goto free_mem;
}
- sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
-
- wait_for_completion(&evt->comp);
- status = be16_to_cpu(rsp.mad_common.status);
- spin_lock_irqsave(vhost->host->host_lock, flags);
- ibmvfc_free_event(evt);
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ for (i = 0; i < num_hwq; i++) {
+ if (!evt_list[i])
+ continue;
- if (status != IBMVFC_MAD_SUCCESS) {
- sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
- switch (status) {
- case IBMVFC_MAD_DRIVER_FAILED:
- case IBMVFC_MAD_CRQ_ERROR:
- /* Host adapter most likely going through reset, return success to
- the caller will wait for the command being cancelled to get returned */
- return 0;
- default:
- return -EIO;
- };
+ wait_for_completion(&evt_list[i]->comp);
+ status = be16_to_cpu(rsp[i].mad_common.status);
+
+ if (status != IBMVFC_MAD_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
+ switch (status) {
+ case IBMVFC_MAD_DRIVER_FAILED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ /* Host adapter most likely going through reset, return success to
+ the caller will wait for the command being cancelled to get returned */
+ goto free_mem;
+ default:
+ ret = -EIO;
+ goto free_mem;
+ };
+ }
}
sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
- return 0;
+free_mem:
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ for (i = 0; i < num_hwq; i++)
+ if (evt_list[i])
+ ibmvfc_free_event(evt_list[i]);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ kfree(evt_list);
+ kfree(rsp);
+
+ return ret;
}
/**
--
2.27.0
Powered by blists - more mailing lists