[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240717075208.87324-3-jacky_gam_2001@163.com>
Date: Wed, 17 Jul 2024 15:52:08 +0800
From: Ping Gan <jacky_gam_2001@....com>
To: hare@...e.de,
sagi@...mberg.me,
hch@....de,
kch@...dia.com,
linux-nvme@...ts.infradead.org,
linux-kernel@...r.kernel.org
Cc: ping.gan@...l.com,
Ping Gan <jacky_gam_2001@....com>
Subject: [PATCH 2/2] nvmet-rdma: add unbound_wq support for nvmet-rdma
To define a module parameter use_unbound_wq to enable unbound
workqueue to handle RDMA's IO of CQ.
Signed-off-by: jackygam2001 <jacky_gam_2001@....com>
---
drivers/nvme/target/rdma.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1eff8ca6a5f1..bfd7106316bc 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -155,6 +155,10 @@ static int nvmet_rdma_srq_size = 1024;
module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
+static bool use_unbound_wq;
+module_param(use_unbound_wq, bool, 0444);
+MODULE_PARM_DESC(use_unbound_wq, "use unbound workqueue to handle IO request: Default false");
+
static DEFINE_IDA(nvmet_rdma_queue_ida);
static LIST_HEAD(nvmet_rdma_queue_list);
static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
@@ -1259,7 +1263,11 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
*/
nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
- queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
+ if (use_unbound_wq)
+ queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
+ queue->comp_vector, IB_POLL_UNBOUND_WORKQUEUE);
+ else
+ queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
queue->comp_vector, IB_POLL_WORKQUEUE);
if (IS_ERR(queue->cq)) {
ret = PTR_ERR(queue->cq);
--
2.26.2
Powered by blists - more mailing lists