lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <VI1PR05MB4431B6A2DF97E31C9103A364992A2@VI1PR05MB4431.eurprd05.prod.outlook.com>
Date: Wed, 13 Mar 2024 21:07:19 +0000
From: Anton Gavriliuk <Anton.Gavriliuk@....ua>
To: Li Feng <fengli@...rtx.com>, Keith Busch <kbusch@...nel.org>, Jens Axboe
	<axboe@...nel.dk>, Christoph Hellwig <hch@....de>, Sagi Grimberg
	<sagi@...mberg.me>, "open list:NVM EXPRESS DRIVER"
	<linux-nvme@...ts.infradead.org>, open list <linux-kernel@...r.kernel.org>
Subject: RE: [PATCH v2 2/2] nvme/tcp: Add wq_unbound modparam for nvme_tcp_wq

Thanks, it works.

Will it be added by default to the 6.9 mainline ?

Anton

-----Original Message-----
From: Li Feng <fengli@...rtx.com>
Sent: Wednesday, March 13, 2024 2:38 PM
To: Keith Busch <kbusch@...nel.org>; Jens Axboe <axboe@...nel.dk>; Christoph Hellwig <hch@....de>; Sagi Grimberg <sagi@...mberg.me>; open list:NVM EXPRESS DRIVER <linux-nvme@...ts.infradead.org>; open list <linux-kernel@...r.kernel.org>
Cc: Anton Gavriliuk <Anton.Gavriliuk@....ua>; Li Feng <fengli@...rtx.com>
Subject: [PATCH v2 2/2] nvme/tcp: Add wq_unbound modparam for nvme_tcp_wq

The default nvme_tcp_wq will use all CPUs to process tasks. Sometimes it is necessary to set CPU affinity to improve performance.

A new module parameter wq_unbound is added here. If set to true, users can configure cpu affinity through /sys/devices/virtual/workqueue/nvme_tcp_wq/cpumask.

Signed-off-by: Li Feng <fengli@...rtx.com>
---
 drivers/nvme/host/tcp.c | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 2ec1186db0a3..34a882b2ec53 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -36,6 +36,14 @@ static int so_priority;  module_param(so_priority, int, 0644);  MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");

+/*
+ * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu
+affinity
+ * from sysfs.
+ */
+static bool wq_unbound;
+module_param(wq_unbound, bool, 0644);
+MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO
+context (default false)");
+
 /*
  * TLS handshake timeout
  */
@@ -1551,7 +1559,10 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
        else if (nvme_tcp_poll_queue(queue))
                n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
                                ctrl->io_queues[HCTX_TYPE_READ] - 1;
-       queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+       if (wq_unbound)
+               queue->io_cpu = WORK_CPU_UNBOUND;
+       else
+               queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
 }

 static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid) @@ -2790,6 +2801,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {

 static int __init nvme_tcp_init_module(void)  {
+       unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+
        BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
        BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
        BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24); @@ -2799,8 +2812,10 @@ static int __init nvme_tcp_init_module(void)
        BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
        BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);

-       nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
-                       WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS, 0);
+       if (wq_unbound)
+               wq_flags |= WQ_UNBOUND;
+
+       nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
        if (!nvme_tcp_wq)
                return -ENOMEM;

--
2.44.0

Anton

-----Original Message-----
From: Li Feng <fengli@...rtx.com>
Sent: Wednesday, March 13, 2024 2:38 PM
To: Keith Busch <kbusch@...nel.org>; Jens Axboe <axboe@...nel.dk>; Christoph Hellwig <hch@....de>; Sagi Grimberg <sagi@...mberg.me>; open list:NVM EXPRESS DRIVER <linux-nvme@...ts.infradead.org>; open list <linux-kernel@...r.kernel.org>
Cc: Anton Gavriliuk <Anton.Gavriliuk@....ua>; Li Feng <fengli@...rtx.com>
Subject: [PATCH v2 2/2] nvme/tcp: Add wq_unbound modparam for nvme_tcp_wq

The default nvme_tcp_wq will use all CPUs to process tasks. Sometimes it is necessary to set CPU affinity to improve performance.

A new module parameter wq_unbound is added here. If set to true, users can configure cpu affinity through /sys/devices/virtual/workqueue/nvme_tcp_wq/cpumask.

Signed-off-by: Li Feng <fengli@...rtx.com>
---
 drivers/nvme/host/tcp.c | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 2ec1186db0a3..34a882b2ec53 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -36,6 +36,14 @@ static int so_priority;  module_param(so_priority, int, 0644);  MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");

+/*
+ * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu
+affinity
+ * from sysfs.
+ */
+static bool wq_unbound;
+module_param(wq_unbound, bool, 0644);
+MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO
+context (default false)");
+
 /*
  * TLS handshake timeout
  */
@@ -1551,7 +1559,10 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
        else if (nvme_tcp_poll_queue(queue))
                n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
                                ctrl->io_queues[HCTX_TYPE_READ] - 1;
-       queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+       if (wq_unbound)
+               queue->io_cpu = WORK_CPU_UNBOUND;
+       else
+               queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
 }

 static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid) @@ -2790,6 +2801,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {

 static int __init nvme_tcp_init_module(void)  {
+       unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+
        BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
        BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
        BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24); @@ -2799,8 +2812,10 @@ static int __init nvme_tcp_init_module(void)
        BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
        BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);

-       nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
-                       WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS, 0);
+       if (wq_unbound)
+               wq_flags |= WQ_UNBOUND;
+
+       nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
        if (!nvme_tcp_wq)
                return -ENOMEM;

--
2.44.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ