[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180316152340.674900786@linuxfoundation.org>
Date: Fri, 16 Mar 2018 16:23:42 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Dick Kennedy <dick.kennedy@...adcom.com>,
James Smart <james.smart@...adcom.com>,
Hannes Reinecke <hare@...e.com>,
"Martin K. Petersen" <martin.petersen@...cle.com>,
Sasha Levin <alexander.levin@...rosoft.com>
Subject: [PATCH 4.15 081/128] scsi: lpfc: Fix crash during driver unload with running nvme traffic
4.15-stable review patch. If anyone has any objections, please let me know.
------------------
From: James Smart <jsmart2021@...il.com>
[ Upstream commit 3386f4bdd243ad5a9094d390297602543abe9902 ]
When the driver is unloading, the nvme transport could be in the process
of submitting new requests, will send abort requests to terminate
associations, or may make LS-related requests. The driver's abort and
request entry points currently is ignorant of the unloading state and is
starting the requests even though the infrastructure to complete them
continues to teardown.
Change the entry points for new requests to check whether unloading and
if so, reject the requests. Abort routines check unloading, and if so,
noop the request. An abort is noop'd as the teardown paths are already
aborting/terminating the io outstanding at the time the teardown
initiated.
Signed-off-by: Dick Kennedy <dick.kennedy@...adcom.com>
Signed-off-by: James Smart <james.smart@...adcom.com>
Reviewed-by: Hannes Reinecke <hare@...e.com>
Signed-off-by: Martin K. Petersen <martin.petersen@...cle.com>
Signed-off-by: Sasha Levin <alexander.levin@...rosoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/scsi/lpfc/lpfc_nvme.c | 14 ++++++++++++++
drivers/scsi/lpfc/lpfc_nvmet.c | 11 +++++++++++
2 files changed, 25 insertions(+)
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -419,6 +419,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_po
if (vport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -534,6 +537,9 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@@ -1260,6 +1266,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_l
goto out_fail;
}
+ if (vport->load_flag & FC_UNLOADING) {
+ ret = -ENODEV;
+ goto out_fail;
+ }
+
/* Validate pointers. */
if (!pnvme_lport || !pnvme_rport || !freqpriv) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
@@ -1487,6 +1498,9 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6002 Abort Request to rport DID x%06x "
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -635,6 +635,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_ta
if (phba->pport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
@@ -721,6 +724,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_ta
goto aerr;
}
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ rc = -ENODEV;
+ goto aerr;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
@@ -822,6 +830,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc
if (phba->pport->load_flag & FC_UNLOADING)
return;
+
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
Powered by blists - more mailing lists