[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190829181311.7562-52-sashal@kernel.org>
Date: Thu, 29 Aug 2019 14:12:47 -0400
From: Sasha Levin <sashal@...nel.org>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
Cc: James Smart <jsmart2021@...il.com>,
Dick Kennedy <dick.kennedy@...adcom.com>,
Ming Lei <ming.lei@...hat.com>,
"Ewan D . Milne" <emilne@...hat.com>,
"Martin K . Petersen" <martin.petersen@...cle.com>,
Sasha Levin <sashal@...nel.org>, linux-scsi@...r.kernel.org
Subject: [PATCH AUTOSEL 5.2 52/76] scsi: lpfc: Mitigate high memory pre-allocation by SCSI-MQ
From: James Smart <jsmart2021@...il.com>
[ Upstream commit 77ffd3465ba837e9dc714e17b014e77b2eae765a ]
When SCSI-MQ is enabled, the SCSI-MQ layers will do pre-allocation of MQ
resources based on shost values set by the driver. In newer cases of the
driver, which attempts to set nr_hw_queues to the cpu count, the
multipliers become excessive, with a single shost having SCSI-MQ
pre-allocation reaching into the multiple GBytes range. NPIV, which
creates additional shosts, only multiply this overhead. On lower-memory
systems, this can exhaust system memory very quickly, resulting in a system
crash or failures in the driver or elsewhere due to low memory conditions.
After testing several scenarios, the situation can be mitigated by limiting
the value set in shost->nr_hw_queues to 4. Although the shost values were
changed, the driver still had per-cpu hardware queues of its own that
allowed parallelization per-cpu. Testing revealed that even with the
smallish number for nr_hw_queues for SCSI-MQ, performance levels remained
near maximum with the within-driver affiinitization.
A module parameter was created to allow the value set for the nr_hw_queues
to be tunable.
Signed-off-by: Dick Kennedy <dick.kennedy@...adcom.com>
Signed-off-by: James Smart <jsmart2021@...il.com>
Reviewed-by: Ming Lei <ming.lei@...hat.com>
Reviewed-by: Ewan D. Milne <emilne@...hat.com>
Signed-off-by: Martin K. Petersen <martin.petersen@...cle.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/scsi/lpfc/lpfc.h | 1 +
drivers/scsi/lpfc/lpfc_attr.c | 15 +++++++++++++++
drivers/scsi/lpfc/lpfc_init.c | 10 ++++++----
drivers/scsi/lpfc/lpfc_sli4.h | 5 +++++
4 files changed, 27 insertions(+), 4 deletions(-)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index aafcffaa25f71..4604e1bc334c0 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -822,6 +822,7 @@ struct lpfc_hba {
uint32_t cfg_cq_poll_threshold;
uint32_t cfg_cq_max_proc_limit;
uint32_t cfg_fcp_cpu_map;
+ uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
uint32_t cfg_suppress_rsp;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d4c65e2109e2f..353da12d797ba 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5640,6 +5640,19 @@ LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
"Embed NVME Command in WQE");
+/*
+ * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
+ * the driver will advertise it supports to the SCSI layer.
+ *
+ * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
+ * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
+ *
+ * Value range is [0,128]. Default value is 8.
+ */
+LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
+ LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
+ "Set the number of SCSI Queues advertised");
+
/*
* lpfc_hdw_queue: Set the number of Hardware Queues the driver
* will advertise it supports to the NVME and SCSI layers. This also
@@ -5961,6 +5974,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_cq_poll_threshold,
&dev_attr_lpfc_cq_max_proc_limit,
&dev_attr_lpfc_fcp_cpu_map,
+ &dev_attr_lpfc_fcp_mq_threshold,
&dev_attr_lpfc_hdw_queue,
&dev_attr_lpfc_irq_chann,
&dev_attr_lpfc_suppress_rsp,
@@ -7042,6 +7056,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
+ lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index eaaef682de251..2fd8f15f99975 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4308,10 +4308,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
- shost->nr_hw_queues = phba->cfg_hdw_queue;
- else
- shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
+ if (!phba->cfg_fcp_mq_threshold ||
+ phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
+ phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
+
+ shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
+ phba->cfg_fcp_mq_threshold);
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 8e4fd1a98023c..986594ec40e2a 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -44,6 +44,11 @@
#define LPFC_HBA_HDWQ_MAX 128
#define LPFC_HBA_HDWQ_DEF 0
+/* FCP MQ queue count limiting */
+#define LPFC_FCP_MQ_THRESHOLD_MIN 0
+#define LPFC_FCP_MQ_THRESHOLD_MAX 128
+#define LPFC_FCP_MQ_THRESHOLD_DEF 8
+
/* Common buffer size to accomidate SCSI and NVME IO buffers */
#define LPFC_COMMON_IO_BUF_SZ 768
--
2.20.1
Powered by blists - more mailing lists