lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251016062250.1461903-1-michal.swiatkowski@linux.intel.com>
Date: Thu, 16 Oct 2025 08:22:50 +0200
From: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>
To: intel-wired-lan@...ts.osuosl.org
Cc: netdev@...r.kernel.org,
	Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>,
	Jacob Keller <jacob.e.keller@...el.com>
Subject: [PATCH iwl-next v1] ice: lower default irq/queue counts on high-core systems

On some high-core systems loading ice driver with default values can
lead to queue/irq exhaustion. It will result in no additional resources
for SR-IOV.

In most cases there is no performance reason for more than 64 queues.
Limit the default value to 64. Still, using ethtool the number of
queues can be changed up to num_online_cpus().

This change affects only the default queue amount on systems with more
than 64 cores.

Reviewed-by: Jacob Keller <jacob.e.keller@...el.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>
---
 drivers/net/ethernet/intel/ice/ice.h     | 20 ++++++++++++++++++++
 drivers/net/ethernet/intel/ice/ice_irq.c |  6 ++++--
 drivers/net/ethernet/intel/ice/ice_lib.c |  8 ++++----
 3 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 3d4d8b88631b..354ec2950ff3 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -1133,4 +1133,24 @@ static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf)
 	else
 		return &pf->adapter->ctrl_pf->hw;
 }
+
+/**
+ * ice_capped_num_cpus - normalize the number of CPUs to a reasonable limit
+ *
+ * This function returns the number of online CPUs, but caps it at suitable
+ * default to prevent excessive resource allocation on systems with very high
+ * CPU counts.
+ *
+ * Note: suitable default is currently at 64, which is reflected in default_cpus
+ * constant. In most cases there is no much benefit for more than 64 and it is a
+ * power of 2 number.
+ *
+ * Return: number of online CPUs, capped at suitable default.
+ */
+static inline u16 ice_capped_num_cpus(void)
+{
+	const int default_cpus = 64;
+
+	return min(num_online_cpus(), default_cpus);
+}
 #endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index 30801fd375f0..df4d847ca858 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -106,9 +106,11 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
 #define ICE_RDMA_AEQ_MSIX 1
 static int ice_get_default_msix_amount(struct ice_pf *pf)
 {
-	return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+	u16 cpus = ice_capped_num_cpus();
+
+	return ICE_MIN_LAN_OICR_MSIX + cpus +
 	       (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
-	       (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
+	       (ice_is_rdma_ena(pf) ? cpus + ICE_RDMA_AEQ_MSIX : 0);
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index bac481e8140d..3c5f8a4b6c6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -159,12 +159,12 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
 
 static u16 ice_get_rxq_count(struct ice_pf *pf)
 {
-	return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+	return min(ice_get_avail_rxq_count(pf), ice_capped_num_cpus());
 }
 
 static u16 ice_get_txq_count(struct ice_pf *pf)
 {
-	return min(ice_get_avail_txq_count(pf), num_online_cpus());
+	return min(ice_get_avail_txq_count(pf), ice_capped_num_cpus());
 }
 
 /**
@@ -907,13 +907,13 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
 		if (vsi->type == ICE_VSI_CHNL)
 			vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
 		else
-			vsi->rss_size = min_t(u16, num_online_cpus(),
+			vsi->rss_size = min_t(u16, ice_capped_num_cpus(),
 					      max_rss_size);
 		vsi->rss_lut_type = ICE_LUT_PF;
 		break;
 	case ICE_VSI_SF:
 		vsi->rss_table_size = ICE_LUT_VSI_SIZE;
-		vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+		vsi->rss_size = min_t(u16, ice_capped_num_cpus(), max_rss_size);
 		vsi->rss_lut_type = ICE_LUT_VSI;
 		break;
 	case ICE_VSI_VF:
-- 
2.49.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ