[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230413132941.2489795-1-fengli@smartx.com>
Date: Thu, 13 Apr 2023 21:29:41 +0800
From: Li Feng <fengli@...rtx.com>
To: Keith Busch <kbusch@...nel.org>, Jens Axboe <axboe@...com>,
Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>,
linux-nvme@...ts.infradead.org (open list:NVM EXPRESS DRIVER),
linux-kernel@...r.kernel.org (open list)
Cc: lifeng1519@...il.com, Li Feng <fengli@...rtx.com>
Subject: [PATCH v2] nvme/tcp: Add support to set the tcp worker cpu affinity
The default worker affinity policy is using all online cpus, e.g. from 0
to N-1. However, some cpus are busy for other jobs, then the nvme-tcp will
have a bad performance.
This patch adds a module parameter to set the cpu affinity for the nvme-tcp
socket worker threads. The parameter is a comma separated list of CPU
numbers. The list is parsed and the resulting cpumask is used to set the
affinity of the socket worker threads. If the list is empty or the
parsing fails, the default affinity is used.
Signed-off-by: Li Feng <fengli@...rtx.com>
---
V2 - Fix missing static reported by lkp
drivers/nvme/host/tcp.c | 54 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 53 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 49c9e7bc9116..47748de5159b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -31,6 +31,18 @@ static int so_priority;
module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
+/* Support for specifying the CPU affinity for the nvme-tcp socket worker
+ * threads. This is a comma separated list of CPU numbers. The list is
+ * parsed and the resulting cpumask is used to set the affinity of the
+ * socket worker threads. If the list is empty or the parsing fails, the
+ * default affinity is used.
+ */
+static char *cpu_affinity_list;
+module_param(cpu_affinity_list, charp, 0644);
+MODULE_PARM_DESC(cpu_affinity_list, "nvme tcp socket worker cpu affinity list");
+
+static struct cpumask cpu_affinity_mask;
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
@@ -1483,6 +1495,41 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
ctrl->io_queues[HCTX_TYPE_POLL];
}
+static ssize_t update_cpu_affinity(const char *buf)
+{
+ cpumask_var_t new_value;
+ cpumask_var_t dst_value;
+ int err = 0;
+
+ if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = bitmap_parselist(buf, cpumask_bits(new_value), nr_cpumask_bits);
+ if (err)
+ goto free_new_cpumask;
+
+ if (!zalloc_cpumask_var(&dst_value, GFP_KERNEL)) {
+ err = -ENOMEM;
+ goto free_new_cpumask;
+ }
+
+ /*
+ * If the new_value does not have any intersection with the cpu_online_mask,
+ * the dst_value will be empty, then keep the cpu_affinity_mask as cpu_online_mask.
+ */
+ if (cpumask_and(dst_value, new_value, cpu_online_mask))
+ cpu_affinity_mask = *dst_value;
+
+ free_cpumask_var(dst_value);
+
+free_new_cpumask:
+ free_cpumask_var(new_value);
+ if (err)
+ pr_err("failed to update cpu affinity mask, bad affinity list [%s], err %d\n",
+ buf, err);
+ return err;
+}
+
static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
@@ -1496,7 +1543,12 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
else if (nvme_tcp_poll_queue(queue))
n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
ctrl->io_queues[HCTX_TYPE_READ] - 1;
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+
+ if (!cpu_affinity_list || update_cpu_affinity(cpu_affinity_list) != 0) {
+ // Set the default cpu_affinity_mask to cpu_online_mask
+ cpu_affinity_mask = *cpu_online_mask;
+ }
+ queue->io_cpu = cpumask_next_wrap(n - 1, &cpu_affinity_mask, -1, false);
}
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
--
2.40.0
Powered by blists - more mailing lists