lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210211231803.25463-1-melanieplageman@gmail.com>
Date:   Thu, 11 Feb 2021 23:18:03 +0000
From:   Melanie Plageman <melanieplageman@...il.com>
To:     linux-scsi@...r.kernel.org
Cc:     andres@...razel.de, kys@...rosoft.com, haiyangz@...rosoft.com,
        sthemmin@...rosoft.com, wei.liu@...nel.org, jejb@...ux.ibm.com,
        martin.petersen@...cle.com, linux-hyperv@...r.kernel.org,
        linux-kernel@...r.kernel.org,
        "Melanie Plageman (Microsoft)" <melanieplageman@...il.com>
Subject: [PATCH v3] scsi: storvsc: Parameterize number hardware queues

From: "Melanie Plageman (Microsoft)" <melanieplageman@...il.com>

Add ability to set the number of hardware queues with new module parameter,
storvsc_max_hw_queues. The default value remains the number of CPUs.  This
functionality is useful in some environments (e.g. Microsoft Azure) where
decreasing the number of hardware queues has been shown to improve
performance.

Signed-off-by: Melanie Plageman (Microsoft) <melanieplageman@...il.com>
---
 drivers/scsi/storvsc_drv.c | 28 ++++++++++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 2e4fa77445fd..a64e6664c915 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -378,10 +378,14 @@ static u32 max_outstanding_req_per_channel;
 static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
 
 static int storvsc_vcpus_per_sub_channel = 4;
+static int storvsc_max_hw_queues = -1;
 
 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
 
+module_param(storvsc_max_hw_queues, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues");
+
 module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
 MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
 
@@ -1897,6 +1901,7 @@ static int storvsc_probe(struct hv_device *device,
 {
 	int ret;
 	int num_cpus = num_online_cpus();
+	int num_present_cpus = num_present_cpus();
 	struct Scsi_Host *host;
 	struct hv_host_device *host_dev;
 	bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
@@ -2004,8 +2009,19 @@ static int storvsc_probe(struct hv_device *device,
 	 * For non-IDE disks, the host supports multiple channels.
 	 * Set the number of HW queues we are supporting.
 	 */
-	if (!dev_is_ide)
-		host->nr_hw_queues = num_present_cpus();
+	if (!dev_is_ide) {
+		if (storvsc_max_hw_queues == -1)
+			host->nr_hw_queues = num_present_cpus;
+		else if (storvsc_max_hw_queues > num_present_cpus ||
+			 storvsc_max_hw_queues == 0 ||
+			storvsc_max_hw_queues < -1) {
+			storvsc_log(device, STORVSC_LOGGING_WARN,
+				"Resetting invalid storvsc_max_hw_queues value to default.\n");
+			host->nr_hw_queues = num_present_cpus;
+			storvsc_max_hw_queues = -1;
+		} else
+			host->nr_hw_queues = storvsc_max_hw_queues;
+	}
 
 	/*
 	 * Set the error handler work queue.
@@ -2169,6 +2185,14 @@ static int __init storvsc_drv_init(void)
 		vmscsi_size_delta,
 		sizeof(u64)));
 
+	if (storvsc_max_hw_queues > num_present_cpus() ||
+		storvsc_max_hw_queues == 0 ||
+		storvsc_max_hw_queues < -1) {
+		pr_warn("Setting storvsc_max_hw_queues to -1. %d is invalid.\n",
+			storvsc_max_hw_queues);
+		storvsc_max_hw_queues = -1;
+	}
+
 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
 	fc_transport_template = fc_attach_transport(&fc_transport_functions);
 	if (!fc_transport_template)
-- 
2.20.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ