[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231218153105.12717-17-dwagner@suse.de>
Date: Mon, 18 Dec 2023 16:31:04 +0100
From: Daniel Wagner <dwagner@...e.de>
To: linux-nvme@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org,
Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>,
Keith Busch <kbusch@...nel.org>,
James Smart <james.smart@...adcom.com>,
Hannes Reinecke <hare@...e.de>,
Daniel Wagner <dwagner@...e.de>
Subject: [PATCH v3 16/16] nvmet-fc: take ref count on tgtport before delete assoc
We have to ensure that the tgtport is not going away
before be have remove all the associations.
Signed-off-by: Daniel Wagner <dwagner@...e.de>
---
drivers/nvme/target/fc.c | 31 +++++++++++++++++++++++--------
1 file changed, 23 insertions(+), 8 deletions(-)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 30ba4ede333f..455d35ef97eb 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1092,13 +1092,28 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
static void
-nvmet_fc_delete_assoc(struct work_struct *work)
+nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+}
+
+static void
+nvmet_fc_delete_assoc_work(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_delete_assoc(assoc);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_tgtport_get(assoc->tgtport);
+ queue_work(nvmet_wq, &assoc->del_work);
}
static struct nvmet_fc_tgt_assoc *
@@ -1129,7 +1144,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
- INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
atomic_set(&assoc->terminating, 0);
while (needrandom) {
@@ -1489,7 +1504,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- queue_work(nvmet_wq, &assoc->del_work);
+ nvmet_fc_schedule_delete_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
}
rcu_read_unlock();
@@ -1542,7 +1557,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- queue_work(nvmet_wq, &assoc->del_work);
+ nvmet_fc_schedule_delete_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
@@ -1587,7 +1602,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- queue_work(nvmet_wq, &assoc->del_work);
+ nvmet_fc_schedule_delete_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
return;
}
@@ -1894,7 +1909,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
}
- queue_work(nvmet_wq, &assoc->del_work);
+ nvmet_fc_schedule_delete_assoc(assoc);
nvmet_fc_tgt_a_put(assoc);
return false;
--
2.43.0
Powered by blists - more mailing lists