[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zsci0BshPQSXm8kl@boxer>
Date: Thu, 22 Aug 2024 13:36:48 +0200
From: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
To: Larysa Zaremba <larysa.zaremba@...el.com>
CC: <intel-wired-lan@...ts.osuosl.org>, Tony Nguyen
<anthony.l.nguyen@...el.com>, "David S. Miller" <davem@...emloft.net>, "Jacob
Keller" <jacob.e.keller@...el.com>, Eric Dumazet <edumazet@...gle.com>,
"Jakub Kicinski" <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, "Alexei
Starovoitov" <ast@...nel.org>, Daniel Borkmann <daniel@...earbox.net>,
"Jesper Dangaard Brouer" <hawk@...nel.org>, John Fastabend
<john.fastabend@...il.com>, <netdev@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <bpf@...r.kernel.org>,
<magnus.karlsson@...el.com>, Michal Kubiak <michal.kubiak@...el.com>,
Wojciech Drewek <wojciech.drewek@...el.com>, Amritha Nambiar
<amritha.nambiar@...el.com>, Chandan Kumar Rout <chandanx.rout@...el.com>
Subject: Re: [PATCH iwl-net v3 3/6] ice: check for XDP rings instead of bpf
program when unconfiguring
On Mon, Aug 19, 2024 at 12:05:40PM +0200, Larysa Zaremba wrote:
> If VSI rebuild is pending, .ndo_bpf() can attach/detach the XDP program on
> VSI without applying new ring configuration. When unconfiguring the VSI, we
> can encounter the state in which there is an XDP program but no XDP rings
> to destroy or there will be XDP rings that need to be destroyed, but no XDP
> program to indicate their presence.
>
> When unconfiguring, rely on the presence of XDP rings rather then XDP
> program, as they better represent the current state that has to be
> destroyed.
>
> Reviewed-by: Wojciech Drewek <wojciech.drewek@...el.com>
> Reviewed-by: Jacob Keller <jacob.e.keller@...el.com>
> Tested-by: Chandan Kumar Rout <chandanx.rout@...el.com>
> Signed-off-by: Larysa Zaremba <larysa.zaremba@...el.com>
Acked-by: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
> ---
> drivers/net/ethernet/intel/ice/ice_lib.c | 4 ++--
> drivers/net/ethernet/intel/ice/ice_main.c | 4 ++--
> drivers/net/ethernet/intel/ice/ice_xsk.c | 6 +++---
> 3 files changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
> index a8721ecdf2cd..b72338974a60 100644
> --- a/drivers/net/ethernet/intel/ice/ice_lib.c
> +++ b/drivers/net/ethernet/intel/ice/ice_lib.c
> @@ -2419,7 +2419,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
> dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
> vsi->vsi_num, err);
>
> - if (ice_is_xdp_ena_vsi(vsi))
> + if (vsi->xdp_rings)
> /* return value check can be skipped here, it always returns
> * 0 if reset is in progress
> */
> @@ -2521,7 +2521,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
> for (q = 0; q < q_vector->num_ring_tx; q++) {
> ice_write_itr(&q_vector->tx, 0);
> wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
> - if (ice_is_xdp_ena_vsi(vsi)) {
> + if (vsi->xdp_rings) {
> u32 xdp_txq = txq + vsi->num_xdp_txq;
>
> wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
> diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
> index e92f43850671..a718763d2370 100644
> --- a/drivers/net/ethernet/intel/ice/ice_main.c
> +++ b/drivers/net/ethernet/intel/ice/ice_main.c
> @@ -7228,7 +7228,7 @@ int ice_down(struct ice_vsi *vsi)
> if (tx_err)
> netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
> vsi->vsi_num, tx_err);
> - if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
> + if (!tx_err && vsi->xdp_rings) {
> tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
> if (tx_err)
> netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
> @@ -7245,7 +7245,7 @@ int ice_down(struct ice_vsi *vsi)
> ice_for_each_txq(vsi, i)
> ice_clean_tx_ring(vsi->tx_rings[i]);
>
> - if (ice_is_xdp_ena_vsi(vsi))
> + if (vsi->xdp_rings)
> ice_for_each_xdp_txq(vsi, i)
> ice_clean_tx_ring(vsi->xdp_rings[i]);
>
> diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
> index a659951fa987..8693509efbe7 100644
> --- a/drivers/net/ethernet/intel/ice/ice_xsk.c
> +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
> @@ -39,7 +39,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
> sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
> memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
> sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
> - if (ice_is_xdp_ena_vsi(vsi))
> + if (vsi->xdp_rings)
> memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
> sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
> }
> @@ -52,7 +52,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
> static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
> {
> ice_clean_tx_ring(vsi->tx_rings[q_idx]);
> - if (ice_is_xdp_ena_vsi(vsi))
> + if (vsi->xdp_rings)
> ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
> ice_clean_rx_ring(vsi->rx_rings[q_idx]);
> }
> @@ -194,7 +194,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
> err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
> if (!fail)
> fail = err;
> - if (ice_is_xdp_ena_vsi(vsi)) {
> + if (vsi->xdp_rings) {
> struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
>
> memset(&txq_meta, 0, sizeof(txq_meta));
> --
> 2.43.0
>
Powered by blists - more mailing lists