[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <fd205548c8102ebbea3d09a7020b9b1fbc4beefb.1685350577.git.chunguang.xu@shopee.com>
Date: Mon, 29 May 2023 18:59:23 +0800
From: "brookxu.cn" <brookxu.cn@...il.com>
To: kbusch@...nel.org, axboe@...nel.dk, hch@....de, sagi@...mberg.me
Cc: linux-nvme@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: [RFC PATCH 1/4] nvme: unfreeze while exit from recovery or resetting
From: Chunguang Xu <chunguang.xu@...pee.com>
Remove controller will interrupt err_work/connect_work, leave
controller at freezed and IO queues at quiescing. Then IOs
issued by scan_work will be blocked, nvme_remove_namespaces()
will hang on fush scan_work. Try to fix that we should unfreeze
contrller and unquiescing IO queues while exit from
error_recovery or resetting.
Signed-off-by: Chunguang Xu <chunguang.xu@...pee.com>
---
drivers/nvme/host/tcp.c | 24 ++++++++++++++++--------
1 file changed, 16 insertions(+), 8 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bf0230442d57..cfebcae7fc9b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2031,12 +2031,24 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_tcp_destroy_io_queues(ctrl, remove);
}
+static inline void nvme_ctrl_reconnect_exit(struct nvme_ctrl *ctrl)
+{
+ /* fast fail all pending requests */
+ blk_mq_unquiesce_queue(ctrl->admin_q);
+
+ if (ctrl->queue_count > 1) {
+ nvme_unquiesce_io_queues(ctrl);
+ nvme_unfreeze(ctrl);
+ }
+}
+
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
{
/* If we are resetting/deleting then do nothing */
if (ctrl->state != NVME_CTRL_CONNECTING) {
WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
ctrl->state == NVME_CTRL_LIVE);
+ nvme_ctrl_reconnect_exit(ctrl);
return;
}
@@ -2107,13 +2119,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
return 0;
destroy_io:
- if (ctrl->queue_count > 1) {
- nvme_quiesce_io_queues(ctrl);
- nvme_sync_io_queues(ctrl);
- nvme_tcp_stop_io_queues(ctrl);
- nvme_cancel_tagset(ctrl);
- nvme_tcp_destroy_io_queues(ctrl, new);
- }
+ nvme_tcp_teardown_io_queues(ctrl, new);
destroy_admin:
nvme_quiesce_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
@@ -2166,6 +2172,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
/* state change failure is ok if we started ctrl delete */
WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
ctrl->state != NVME_CTRL_DELETING_NOIO);
+ nvme_ctrl_reconnect_exit(ctrl);
return;
}
@@ -2197,6 +2204,7 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
/* state change failure is ok if we started ctrl delete */
WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
ctrl->state != NVME_CTRL_DELETING_NOIO);
+ nvme_ctrl_reconnect_exit(ctrl);
return;
}
@@ -2213,7 +2221,7 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
{
flush_work(&to_tcp_ctrl(ctrl)->err_work);
- cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
+ flush_delayed_work(&to_tcp_ctrl(ctrl)->connect_work);
}
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
--
2.25.1
Powered by blists - more mailing lists