[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260130223531.2478849-9-mkhalfella@purestorage.com>
Date: Fri, 30 Jan 2026 14:34:12 -0800
From: Mohamed Khalfella <mkhalfella@...estorage.com>
To: Justin Tee <justin.tee@...adcom.com>,
Naresh Gottumukkala <nareshgottumukkala83@...il.com>,
Paul Ely <paul.ely@...adcom.com>,
Chaitanya Kulkarni <kch@...dia.com>,
Christoph Hellwig <hch@....de>,
Jens Axboe <axboe@...nel.dk>,
Keith Busch <kbusch@...nel.org>,
Sagi Grimberg <sagi@...mberg.me>
Cc: Aaron Dailey <adailey@...estorage.com>,
Randy Jennings <randyj@...estorage.com>,
Dhaval Giani <dgiani@...estorage.com>,
Hannes Reinecke <hare@...e.de>,
linux-nvme@...ts.infradead.org,
linux-kernel@...r.kernel.org,
Mohamed Khalfella <mkhalfella@...estorage.com>
Subject: [PATCH v2 08/14] nvme: Implement cross-controller reset recovery
A host that has more than one path connecting to an nvme subsystem
typically has an nvme controller associated with every path. This is
mostly applicable to nvmeof. If one path goes down, inflight IOs on that
path should not be retried immediately on another path because this
could lead to data corruption as described in TP4129. TP8028 defines
cross-controller reset mechanism that can be used by host to terminate
IOs on the failed path using one of the remaining healthy paths. Only
after IOs are terminated, or long enough time passes as defined by
TP4129, inflight IOs should be retried on another path. Implement core
cross-controller reset shared logic to be used by the transports.
Signed-off-by: Mohamed Khalfella <mkhalfella@...estorage.com>
---
drivers/nvme/host/constants.c | 1 +
drivers/nvme/host/core.c | 129 ++++++++++++++++++++++++++++++++++
drivers/nvme/host/nvme.h | 9 +++
3 files changed, 139 insertions(+)
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index dc90df9e13a2..f679efd5110e 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -46,6 +46,7 @@ static const char * const nvme_admin_ops[] = {
[nvme_admin_virtual_mgmt] = "Virtual Management",
[nvme_admin_nvme_mi_send] = "NVMe Send MI",
[nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
+ [nvme_admin_cross_ctrl_reset] = "Cross Controller Reset",
[nvme_admin_dbbuf] = "Doorbell Buffer Config",
[nvme_admin_format_nvm] = "Format NVM",
[nvme_admin_security_send] = "Security Send",
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3e1e02822dd4..13e0775d56b4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -554,6 +554,134 @@ void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
+static struct nvme_ctrl *nvme_find_ctrl_ccr(struct nvme_ctrl *ictrl,
+ u32 min_cntlid)
+{
+ struct nvme_subsystem *subsys = ictrl->subsys;
+ struct nvme_ctrl *sctrl;
+ unsigned long flags;
+
+ mutex_lock(&nvme_subsystems_lock);
+ list_for_each_entry(sctrl, &subsys->ctrls, subsys_entry) {
+ if (sctrl->cntlid < min_cntlid)
+ continue;
+
+ if (atomic_dec_if_positive(&sctrl->ccr_limit) < 0)
+ continue;
+
+ spin_lock_irqsave(&sctrl->lock, flags);
+ if (sctrl->state != NVME_CTRL_LIVE) {
+ spin_unlock_irqrestore(&sctrl->lock, flags);
+ atomic_inc(&sctrl->ccr_limit);
+ continue;
+ }
+
+ /*
+ * We got a good candidate source controller that is locked and
+ * LIVE. However, no guarantee sctrl will not be deleted after
+ * sctrl->lock is released. Get a ref of both sctrl and admin_q
+ * so they do not disappear until we are done with them.
+ */
+ WARN_ON_ONCE(!blk_get_queue(sctrl->admin_q));
+ nvme_get_ctrl(sctrl);
+ spin_unlock_irqrestore(&sctrl->lock, flags);
+ goto found;
+ }
+ sctrl = NULL;
+found:
+ mutex_unlock(&nvme_subsystems_lock);
+ return sctrl;
+}
+
+static void nvme_put_ctrl_ccr(struct nvme_ctrl *sctrl)
+{
+ atomic_inc(&sctrl->ccr_limit);
+ blk_put_queue(sctrl->admin_q);
+ nvme_put_ctrl(sctrl);
+}
+
+static int nvme_issue_wait_ccr(struct nvme_ctrl *sctrl, struct nvme_ctrl *ictrl)
+{
+ struct nvme_ccr_entry ccr = { };
+ union nvme_result res = { 0 };
+ struct nvme_command c = { };
+ unsigned long flags, tmo;
+ int ret = 0;
+ u32 result;
+
+ init_completion(&ccr.complete);
+ ccr.ictrl = ictrl;
+
+ spin_lock_irqsave(&sctrl->lock, flags);
+ list_add_tail(&ccr.list, &sctrl->ccr_list);
+ spin_unlock_irqrestore(&sctrl->lock, flags);
+
+ c.ccr.opcode = nvme_admin_cross_ctrl_reset;
+ c.ccr.ciu = ictrl->ciu;
+ c.ccr.icid = cpu_to_le16(ictrl->cntlid);
+ c.ccr.cirn = cpu_to_le64(ictrl->cirn);
+ ret = __nvme_submit_sync_cmd(sctrl->admin_q, &c, &res,
+ NULL, 0, NVME_QID_ANY, 0);
+ if (ret)
+ goto out;
+
+ result = le32_to_cpu(res.u32);
+ if (result & 0x01) /* Immediate Reset Successful */
+ goto out;
+
+ tmo = msecs_to_jiffies(max(ictrl->cqt, ictrl->kato * 1000));
+ if (!wait_for_completion_timeout(&ccr.complete, tmo)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (ccr.ccrs != NVME_CCR_STATUS_SUCCESS)
+ ret = -EREMOTEIO;
+out:
+ spin_lock_irqsave(&sctrl->lock, flags);
+ list_del(&ccr.list);
+ spin_unlock_irqrestore(&sctrl->lock, flags);
+ return ret;
+}
+
+unsigned long nvme_fence_ctrl(struct nvme_ctrl *ictrl)
+{
+ unsigned long deadline, now, timeout;
+ struct nvme_ctrl *sctrl;
+ u32 min_cntlid = 0;
+ int ret;
+
+ timeout = nvme_fence_timeout_ms(ictrl);
+ dev_info(ictrl->device, "attempting CCR, timeout %lums\n", timeout);
+
+ now = jiffies;
+ deadline = now + msecs_to_jiffies(timeout);
+ while (time_before(now, deadline)) {
+ sctrl = nvme_find_ctrl_ccr(ictrl, min_cntlid);
+ if (!sctrl) {
+ /* CCR failed, switch to time-based recovery */
+ return deadline - now;
+ }
+
+ ret = nvme_issue_wait_ccr(sctrl, ictrl);
+ if (!ret) {
+ dev_info(ictrl->device, "CCR succeeded using %s\n",
+ dev_name(sctrl->device));
+ nvme_put_ctrl_ccr(sctrl);
+ return 0;
+ }
+
+ /* CCR failed, try another path */
+ min_cntlid = sctrl->cntlid + 1;
+ nvme_put_ctrl_ccr(sctrl);
+ now = jiffies;
+ }
+
+ dev_info(ictrl->device, "CCR reached timeout, call it done\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fence_ctrl);
+
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
@@ -5119,6 +5247,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
+ INIT_LIST_HEAD(&ctrl->ccr_list);
xa_init(&ctrl->cels);
ctrl->dev = dev;
ctrl->ops = ops;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 00866bbc66f3..fa18f580d76a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -279,6 +279,13 @@ enum nvme_ctrl_flags {
NVME_CTRL_FROZEN = 6,
};
+struct nvme_ccr_entry {
+ struct list_head list;
+ struct completion complete;
+ struct nvme_ctrl *ictrl;
+ u8 ccrs;
+};
+
struct nvme_ctrl {
bool comp_seen;
bool identified;
@@ -296,6 +303,7 @@ struct nvme_ctrl {
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
+ struct list_head ccr_list;
struct mutex namespaces_lock;
struct srcu_struct srcu;
struct device ctrl_device;
@@ -814,6 +822,7 @@ blk_status_t nvme_host_path_error(struct request *req);
bool nvme_cancel_request(struct request *req, void *data);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
+unsigned long nvme_fence_ctrl(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
--
2.52.0
Powered by blists - more mailing lists