[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200326070806.25493-6-skashyap@marvell.com>
Date: Thu, 26 Mar 2020 00:08:03 -0700
From: Saurav Kashyap <skashyap@...vell.com>
To: <martin.petersen@...cle.com>
CC: <GR-QLogic-Storage-Upstream@...vell.com>,
<linux-scsi@...r.kernel.org>, <netdev@...r.kernel.org>
Subject: [PATCH 5/8] qedf: Add schedule recovery handler.
From: Chad Dupuis <cdupuis@...vell.com>
- Add recovery handler, this will be triggered
by QED.
Signed-off-by: Chad Dupuis <cdupuis@...vell.com>
Signed-off-by: Saurav Kashyap <skashyap@...vell.com>
---
drivers/scsi/qedf/qedf.h | 3 +++
drivers/scsi/qedf/qedf_main.c | 41 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 44 insertions(+)
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index aaa2ac9..a5134c7 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -387,6 +387,7 @@ struct qedf_ctx {
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
+ struct delayed_work recovery_work;
struct delayed_work grcdump_work;
struct delayed_work stag_work;
@@ -527,6 +528,8 @@ extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
struct fcoe_cqe *cqe);
extern void qedf_restart_rport(struct qedf_rport *fcport);
+void qedf_schedule_recovery_handler(void *dev);
+void qedf_recovery_handler(struct work_struct *work);
extern int qedf_send_rec(struct qedf_ioreq *orig_io_req);
extern int qedf_post_io_req(struct qedf_rport *fcport,
struct qedf_ioreq *io_req);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index ba66216..b3fa21a 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -662,6 +662,7 @@ static u32 qedf_get_login_failures(void *cookie)
{
.link_update = qedf_link_update,
.bw_update = qedf_bw_update,
+ .schedule_recovery_handler = qedf_schedule_recovery_handler,
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
@@ -3510,6 +3511,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->lport->host->host_no);
qedf->dpc_wq = create_workqueue(host_buf);
}
+ INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
/*
* GRC dump and sysfs parameters are not reaped during the recovery
@@ -3825,6 +3827,45 @@ static void qedf_shutdown(struct pci_dev *pdev)
__qedf_remove(pdev, QEDF_MODE_NORMAL);
}
+/*
+ * Recovery handler code
+ */
+void qedf_schedule_recovery_handler(void *dev)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
+ schedule_delayed_work(&qedf->recovery_work, 0);
+}
+
+void qedf_recovery_handler(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, recovery_work.work);
+
+ if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
+ return;
+
+ /*
+ * Call common_ops->recovery_prolog to allow the MFW to quiesce
+ * any PCI transactions.
+ */
+ qed_ops->common->recovery_prolog(qedf->cdev);
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
+ __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
+ /*
+ * Reset link and dcbx to down state since we will not get a link down
+ * event from the MFW but calling __qedf_remove will essentially be a
+ * link down event.
+ */
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+ __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
+ clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
+}
+
/* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
--
1.8.3.1
Powered by blists - more mailing lists