[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260130223531.2478849-14-mkhalfella@purestorage.com>
Date: Fri, 30 Jan 2026 14:34:17 -0800
From: Mohamed Khalfella <mkhalfella@...estorage.com>
To: Justin Tee <justin.tee@...adcom.com>,
Naresh Gottumukkala <nareshgottumukkala83@...il.com>,
Paul Ely <paul.ely@...adcom.com>,
Chaitanya Kulkarni <kch@...dia.com>,
Christoph Hellwig <hch@....de>,
Jens Axboe <axboe@...nel.dk>,
Keith Busch <kbusch@...nel.org>,
Sagi Grimberg <sagi@...mberg.me>
Cc: Aaron Dailey <adailey@...estorage.com>,
Randy Jennings <randyj@...estorage.com>,
Dhaval Giani <dgiani@...estorage.com>,
Hannes Reinecke <hare@...e.de>,
linux-nvme@...ts.infradead.org,
linux-kernel@...r.kernel.org,
Mohamed Khalfella <mkhalfella@...estorage.com>
Subject: [PATCH v2 13/14] nvme-fc: Use CCR to recover controller that hits an error
An alive nvme controller that hits an error now will move to FENCING
state instead of RESETTING state. ctrl->fencing_work attempts CCR to
terminate inflight IOs. If CCR succeeds, switch to FENCED -> RESETTING
and continue error recovery as usual. If CCR fails, the behavior depends
on whether the subsystem supports CQT or not. If CQT is not supported
then reset the controller immediately as if CCR succeeded in order to
maintain the current behavior. If CQT is supported switch to time-based
recovery. Schedule ctrl->fenced_work resets the controller when time
based recovery finishes.
Either ctrl->err_work or ctrl->reset_work can run after a controller is
fenced. Flush fencing work when either work run.
Signed-off-by: Mohamed Khalfella <mkhalfella@...estorage.com>
---
drivers/nvme/host/fc.c | 60 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 60 insertions(+)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index f8f6071b78ed..3a01aeb39081 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -166,6 +166,8 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
+ struct work_struct fencing_work;
+ struct delayed_work fenced_work;
struct work_struct ioerr_work;
struct delayed_work connect_work;
@@ -1866,12 +1868,59 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
}
+static void nvme_fc_fenced_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *fc_ctrl = container_of(to_delayed_work(work),
+ struct nvme_fc_ctrl, fenced_work);
+ struct nvme_ctrl *ctrl = &fc_ctrl->ctrl;
+
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_FENCED);
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ queue_work(nvme_reset_wq, &fc_ctrl->ioerr_work);
+}
+
+static void nvme_fc_fencing_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *fc_ctrl =
+ container_of(work, struct nvme_fc_ctrl, fencing_work);
+ struct nvme_ctrl *ctrl = &fc_ctrl->ctrl;
+ unsigned long rem;
+
+ rem = nvme_fence_ctrl(ctrl);
+ if (!rem)
+ goto done;
+
+ if (!ctrl->cqt) {
+ dev_info(ctrl->device,
+ "CCR failed, CQT not supported, skip time-based recovery\n");
+ goto done;
+ }
+
+ dev_info(ctrl->device,
+ "CCR failed, switch to time-based recovery, timeout = %ums\n",
+ jiffies_to_msecs(rem));
+ queue_delayed_work(nvme_wq, &fc_ctrl->fenced_work, rem);
+ return;
+
+done:
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_FENCED);
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ queue_work(nvme_reset_wq, &fc_ctrl->ioerr_work);
+}
+
+static void nvme_fc_flush_fencing_work(struct nvme_fc_ctrl *ctrl)
+{
+ flush_work(&ctrl->fencing_work);
+ flush_delayed_work(&ctrl->fenced_work);
+}
+
static void
nvme_fc_ctrl_ioerr_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, ioerr_work);
+ nvme_fc_flush_fencing_work(ctrl);
nvme_fc_error_recovery(ctrl);
}
@@ -1896,6 +1945,14 @@ EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid);
static void nvme_fc_start_ioerr_recovery(struct nvme_fc_ctrl *ctrl,
char *errmsg)
{
+ if (nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_FENCING)) {
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: starting controller fencing %s\n",
+ ctrl->cnum, errmsg);
+ queue_work(nvme_wq, &ctrl->fencing_work);
+ return;
+ }
+
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
@@ -3297,6 +3354,7 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
+ nvme_fc_flush_fencing_work(ctrl);
nvme_stop_ctrl(&ctrl->ctrl);
/* will block will waiting for io to terminate */
@@ -3471,6 +3529,8 @@ nvme_fc_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_DELAYED_WORK(&ctrl->fenced_work, nvme_fc_fenced_work);
+ INIT_WORK(&ctrl->fencing_work, nvme_fc_fencing_work);
INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
--
2.52.0
Powered by blists - more mailing lists