[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230504091259.29100-3-dwagner@suse.de>
Date: Thu, 4 May 2023 11:12:52 +0200
From: Daniel Wagner <dwagner@...e.de>
To: linux-nvme@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org, Chaitanya Kulkarni <kch@...dia.com>,
Sagi Grimberg <sagi@...mberg.me>,
Hannes Reinecke <hare@...e.de>,
James Smart <jsmart2021@...il.com>,
Daniel Wagner <dwagner@...e.de>
Subject: [RFC v3 2/9] nvme-rdma: factor rdma specific queue init code out
In preparation to move common code from the fabrics driver to fabrics.c,
move the rmda queue specific initialization code into a separate
function.
Signed-off-by: Daniel Wagner <dwagner@...e.de>
---
drivers/nvme/host/rdma.c | 65 ++++++++++++++++++++++++++++------------
1 file changed, 46 insertions(+), 19 deletions(-)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 92e5d0ccf3a9..a78c66278b19 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -826,15 +826,16 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
nvme_rdma_free_queue(&ctrl->ctrl, 0);
}
-static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
- bool new)
+static int nvme_rdma_init_queue(struct nvme_ctrl *nctrl, int qid)
{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
bool pi_capable = false;
int error;
- error = nvme_rdma_alloc_queue(&ctrl->ctrl, 0);
- if (error)
- return error;
+ if (qid != 0)
+ /* only admin queue needs additional work. */
+ return 0;
+
ctrl->device = ctrl->queues[0].device;
ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
@@ -854,6 +855,43 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
*/
error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
+ if (error)
+ return error;
+
+ ctrl->ctrl.max_segments = ctrl->max_fr_pages;
+ ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
+ if (pi_capable)
+ ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
+ else
+ ctrl->ctrl.max_integrity_segments = 0;
+
+ return 0;
+}
+
+static void nvme_rdma_deinit_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ if (qid != 0)
+ return;
+
+ if (ctrl->async_event_sqe.data) {
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ ctrl->async_event_sqe.data = NULL;
+ }
+}
+
+static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ bool new)
+{
+ int error;
+
+ error = nvme_rdma_alloc_queue(&ctrl->ctrl, 0);
+ if (error)
+ return error;
+
+ error = nvme_rdma_init_queue(&ctrl->ctrl, 0);
if (error)
goto out_free_queue;
@@ -863,7 +901,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
sizeof(struct nvme_rdma_request) +
NVME_RDMA_DATA_SGL_SIZE);
if (error)
- goto out_free_async_qe;
+ goto out_deinit_admin_queue;
}
@@ -875,13 +913,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (error)
goto out_stop_queue;
- ctrl->ctrl.max_segments = ctrl->max_fr_pages;
- ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
- if (pi_capable)
- ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
- else
- ctrl->ctrl.max_integrity_segments = 0;
-
nvme_unquiesce_admin_queue(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
@@ -899,12 +930,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
out_remove_admin_tag_set:
if (new)
nvme_remove_admin_tag_set(&ctrl->ctrl);
-out_free_async_qe:
- if (ctrl->async_event_sqe.data) {
- nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
- sizeof(struct nvme_command), DMA_TO_DEVICE);
- ctrl->async_event_sqe.data = NULL;
- }
+out_deinit_admin_queue:
+ nvme_rdma_deinit_queue(&ctrl->ctrl, 0);
out_free_queue:
nvme_rdma_free_queue(&ctrl->ctrl, 0);
return error;
--
2.40.0
Powered by blists - more mailing lists