[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230504091259.29100-4-dwagner@suse.de>
Date: Thu, 4 May 2023 11:12:53 +0200
From: Daniel Wagner <dwagner@...e.de>
To: linux-nvme@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org, Chaitanya Kulkarni <kch@...dia.com>,
Sagi Grimberg <sagi@...mberg.me>,
Hannes Reinecke <hare@...e.de>,
James Smart <jsmart2021@...il.com>,
Daniel Wagner <dwagner@...e.de>
Subject: [RFC v3 3/9] nvme-tcp: move error and connect work to nvme_ctrl
Move common data structures for fabrics to nvme_ctrl so that we are able
to use them in fabrcis.c later.
Signed-off-by: Daniel Wagner <dwagner@...e.de>
---
drivers/nvme/host/nvme.h | 3 +++
drivers/nvme/host/tcp.c | 24 ++++++++++--------------
2 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bf46f122e9e1..5aa30b00dd17 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -339,6 +339,9 @@ struct nvme_ctrl {
struct work_struct ana_work;
#endif
+ struct work_struct err_work;
+ struct delayed_work connect_work;
+
#ifdef CONFIG_NVME_AUTH
struct work_struct dhchap_auth_work;
struct mutex dhchap_auth_mutex;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 273c1f2760a4..74ccc84d244a 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -166,8 +166,6 @@ struct nvme_tcp_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
- struct work_struct err_work;
- struct delayed_work connect_work;
struct nvme_tcp_request async_req;
u32 io_queues[HCTX_MAX_TYPES];
};
@@ -527,7 +525,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
return;
dev_warn(ctrl->device, "starting error recovery\n");
- queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
+ queue_work(nvme_reset_wq, &ctrl->err_work);
}
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
@@ -2025,7 +2023,7 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
if (nvmf_should_reconnect(ctrl)) {
dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
ctrl->opts->reconnect_delay);
- queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
+ queue_delayed_work(nvme_wq, &ctrl->connect_work,
ctrl->opts->reconnect_delay * HZ);
} else {
dev_info(ctrl->device, "Removing controller...\n");
@@ -2107,9 +2105,8 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
{
- struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
- struct nvme_tcp_ctrl, connect_work);
- struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, connect_work);
++ctrl->nr_reconnects;
@@ -2131,9 +2128,8 @@ static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
static void nvme_tcp_error_recovery_work(struct work_struct *work)
{
- struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
- struct nvme_tcp_ctrl, err_work);
- struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ struct nvme_ctrl *ctrl = container_of(work,
+ struct nvme_ctrl, err_work);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
@@ -2194,8 +2190,8 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
{
- flush_work(&to_tcp_ctrl(ctrl)->err_work);
- cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
+ flush_work(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->connect_work);
}
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
@@ -2581,9 +2577,9 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
- INIT_DELAYED_WORK(&ctrl->connect_work,
+ INIT_DELAYED_WORK(&ctrl->ctrl.connect_work,
nvme_tcp_reconnect_ctrl_work);
- INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
+ INIT_WORK(&ctrl->ctrl.err_work, nvme_tcp_error_recovery_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
--
2.40.0
Powered by blists - more mailing lists