[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241012151304.GK77519@kernel.org>
Date: Sat, 12 Oct 2024 16:13:04 +0100
From: Simon Horman <horms@...nel.org>
To: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>
Cc: intel-wired-lan@...ts.osuosl.org, netdev@...r.kernel.org,
pawel.chmielewski@...el.com, sridhar.samudrala@...el.com,
jacob.e.keller@...el.com, pio.raczynski@...il.com,
konrad.knitter@...el.com, marcin.szycik@...el.com,
wojciech.drewek@...el.com, nex.sw.ncis.nat.hpm.dev@...el.com,
przemyslaw.kitszel@...el.com, jiri@...nulli.us,
David Laight <David.Laight@...lab.com>
Subject: Re: [iwl-next v4 3/8] ice: get rid of num_lan_msix field
+ David Laight
On Mon, Sep 30, 2024 at 02:03:57PM +0200, Michal Swiatkowski wrote:
> Remove the field to allow having more queues than MSI-X on VSI. As
> default the number will be the same, but if there won't be more MSI-X
> available VSI can run with at least one MSI-X.
>
> Reviewed-by: Wojciech Drewek <wojciech.drewek@...el.com>
> Signed-off-by: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>
> ---
> drivers/net/ethernet/intel/ice/ice.h | 1 -
> drivers/net/ethernet/intel/ice/ice_base.c | 10 +++-----
> drivers/net/ethernet/intel/ice/ice_ethtool.c | 8 +++---
> drivers/net/ethernet/intel/ice/ice_irq.c | 11 +++------
> drivers/net/ethernet/intel/ice/ice_lib.c | 26 +++++++++++---------
> 5 files changed, 27 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
> index cf824d041d5a..1e23aec2634f 100644
> --- a/drivers/net/ethernet/intel/ice/ice.h
> +++ b/drivers/net/ethernet/intel/ice/ice.h
> @@ -622,7 +622,6 @@ struct ice_pf {
> u16 max_pf_txqs; /* Total Tx queues PF wide */
> u16 max_pf_rxqs; /* Total Rx queues PF wide */
> struct ice_pf_msix msix;
> - u16 num_lan_msix; /* Total MSIX vectors for base driver */
> u16 num_lan_tx; /* num LAN Tx queues setup */
> u16 num_lan_rx; /* num LAN Rx queues setup */
> u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
...
> diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
> index 85a3b2326e7b..e5c56ec8bbda 100644
> --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
> +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
> @@ -3811,8 +3811,8 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
> */
> static int ice_get_max_txq(struct ice_pf *pf)
> {
> - return min3(pf->num_lan_msix, (u16)num_online_cpus(),
> - (u16)pf->hw.func_caps.common_cap.num_txq);
> + return min_t(u16, num_online_cpus(),
> + pf->hw.func_caps.common_cap.num_txq);
It is unclear why min_t() is used here or elsewhere in this patch
instead of min() as it seems that all the entities being compared
are unsigned. Are you concerned about overflowing u16? If so, perhaps
clamp, or some error handling, is a better approach.
I am concerned that the casting that min_t() brings will hide
any problems that may exist.
> }
>
> /**
> @@ -3821,8 +3821,8 @@ static int ice_get_max_txq(struct ice_pf *pf)
> */
> static int ice_get_max_rxq(struct ice_pf *pf)
> {
> - return min3(pf->num_lan_msix, (u16)num_online_cpus(),
> - (u16)pf->hw.func_caps.common_cap.num_rxq);
> + return min_t(u16, num_online_cpus(),
> + pf->hw.func_caps.common_cap.num_rxq);
> }
>
> /**
...
> diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
> index d4e74f96a8ad..26cfb4b67972 100644
> --- a/drivers/net/ethernet/intel/ice/ice_lib.c
> +++ b/drivers/net/ethernet/intel/ice/ice_lib.c
> @@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
> }
> }
>
> +static u16 ice_get_rxq_count(struct ice_pf *pf)
> +{
> + return min_t(u16, ice_get_avail_rxq_count(pf), num_online_cpus());
> +}
> +
> +static u16 ice_get_txq_count(struct ice_pf *pf)
> +{
> + return min_t(u16, ice_get_avail_txq_count(pf), num_online_cpus());
> +}
> +
> /**
> * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
> * @vsi: the VSI being configured
> @@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
> vsi->alloc_txq = vsi->req_txq;
> vsi->num_txq = vsi->req_txq;
> } else {
> - vsi->alloc_txq = min3(pf->num_lan_msix,
> - ice_get_avail_txq_count(pf),
> - (u16)num_online_cpus());
> + vsi->alloc_txq = ice_get_txq_count(pf);
> }
>
> pf->num_lan_tx = vsi->alloc_txq;
> @@ -193,17 +201,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
> vsi->alloc_rxq = vsi->req_rxq;
> vsi->num_rxq = vsi->req_rxq;
> } else {
> - vsi->alloc_rxq = min3(pf->num_lan_msix,
> - ice_get_avail_rxq_count(pf),
> - (u16)num_online_cpus());
> + vsi->alloc_rxq = ice_get_rxq_count(pf);
> }
> }
>
> pf->num_lan_rx = vsi->alloc_rxq;
>
> - vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
> - max_t(int, vsi->alloc_rxq,
> - vsi->alloc_txq));
> + vsi->num_q_vectors = max_t(int, vsi->alloc_rxq,
> + vsi->alloc_txq);
> break;
> case ICE_VSI_SF:
> vsi->alloc_txq = 1;
> @@ -1173,12 +1178,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
> static void
> ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
> {
> - struct ice_pf *pf = vsi->back;
> u16 qcount, qmap;
> u8 offset = 0;
> int pow;
>
> - qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
> + qcount = vsi->num_rxq;
>
> pow = order_base_2(qcount);
> qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
> --
> 2.42.0
>
>
Powered by blists - more mailing lists