[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260109210647.3849008-4-anthony.l.nguyen@intel.com>
Date: Fri, 9 Jan 2026 13:06:40 -0800
From: Tony Nguyen <anthony.l.nguyen@...el.com>
To: davem@...emloft.net,
kuba@...nel.org,
pabeni@...hat.com,
edumazet@...gle.com,
andrew+netdev@...n.ch,
netdev@...r.kernel.org
Cc: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>,
anthony.l.nguyen@...el.com,
Aleksandr Loktionov <aleksandr.loktionov@...el.com>,
Rafal Romanowski <rafal.romanowski@...el.com>
Subject: [PATCH net-next 3/5] ice: use netif_get_num_default_rss_queues()
From: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>
On some high-core systems (like AMD EPYC Bergamo, Intel Clearwater
Forest) loading ice driver with default values can lead to queue/irq
exhaustion. It will result in no additional resources for SR-IOV.
In most cases there is no performance reason for more than half
num_cpus(). Limit the default value to it using generic
netif_get_num_default_rss_queues().
Still, using ethtool the number of queues can be changed up to
num_online_cpus(). It can be done by calling:
$ethtool -L ethX combined $(nproc)
This change affects only the default queue amount.
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@...el.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski@...el.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@...el.com>
---
drivers/net/ethernet/intel/ice/ice_irq.c | 5 +++--
drivers/net/ethernet/intel/ice/ice_lib.c | 12 ++++++++----
2 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index 30801fd375f0..1d9b2d646474 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -106,9 +106,10 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
#define ICE_RDMA_AEQ_MSIX 1
static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ return ICE_MIN_LAN_OICR_MSIX + netif_get_num_default_rss_queues() +
(test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
- (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
+ (ice_is_rdma_ena(pf) ? netif_get_num_default_rss_queues() +
+ ICE_RDMA_AEQ_MSIX : 0);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 15621707fbf8..44f3c2bab308 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -159,12 +159,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
static u16 ice_get_rxq_count(struct ice_pf *pf)
{
- return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+ return min(ice_get_avail_rxq_count(pf),
+ netif_get_num_default_rss_queues());
}
static u16 ice_get_txq_count(struct ice_pf *pf)
{
- return min(ice_get_avail_txq_count(pf), num_online_cpus());
+ return min(ice_get_avail_txq_count(pf),
+ netif_get_num_default_rss_queues());
}
/**
@@ -907,13 +909,15 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_CHNL)
vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
else
- vsi->rss_size = min_t(u16, num_online_cpus(),
+ vsi->rss_size = min_t(u16,
+ netif_get_num_default_rss_queues(),
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
case ICE_VSI_SF:
vsi->rss_table_size = ICE_LUT_VSI_SIZE;
- vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_size = min_t(u16, netif_get_num_default_rss_queues(),
+ max_rss_size);
vsi->rss_lut_type = ICE_LUT_VSI;
break;
case ICE_VSI_VF:
--
2.47.1
Powered by blists - more mailing lists