[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240724164840.2536605-4-larysa.zaremba@intel.com>
Date: Wed, 24 Jul 2024 18:48:34 +0200
From: Larysa Zaremba <larysa.zaremba@...el.com>
To: intel-wired-lan@...ts.osuosl.org
Cc: Larysa Zaremba <larysa.zaremba@...el.com>,
Tony Nguyen <anthony.l.nguyen@...el.com>,
"David S. Miller" <davem@...emloft.net>,
Jacob Keller <jacob.e.keller@...el.com>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Maciej Fijalkowski <maciej.fijalkowski@...el.com>,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
bpf@...r.kernel.org,
magnus.karlsson@...el.com,
Michal Kubiak <michal.kubiak@...el.com>,
Wojciech Drewek <wojciech.drewek@...el.com>,
Amritha Nambiar <amritha.nambiar@...el.com>
Subject: [PATCH iwl-net v2 3/6] ice: check for XDP rings instead of bpf program when unconfiguring
If VSI rebuild is pending, .ndo_bpf() can attach/detach the XDP program on
VSI without applying new ring configuration. When unconfiguring the VSI, we
can encounter the state in which there is an XDP program but no XDP rings
to destroy or there will be XDP rings that need to be destroyed, but no XDP
program to indicate their presence.
When unconfiguring, rely on the presence of XDP rings rather then XDP
program, as they better represent the current state that has to be
destroyed.
Reviewed-by: Wojciech Drewek <wojciech.drewek@...el.com>
Signed-off-by: Larysa Zaremba <larysa.zaremba@...el.com>
---
drivers/net/ethernet/intel/ice/ice_lib.c | 4 ++--
drivers/net/ethernet/intel/ice/ice_main.c | 4 ++--
drivers/net/ethernet/intel/ice/ice_xsk.c | 6 +++---
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index bbef5ec67cae..f9852f1a136e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2419,7 +2419,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
vsi->vsi_num, err);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (vsi->xdp_rings)
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
@@ -2521,7 +2521,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
for (q = 0; q < q_vector->num_ring_tx; q++) {
ice_write_itr(&q_vector->tx, 0);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
- if (ice_is_xdp_ena_vsi(vsi)) {
+ if (vsi->xdp_rings) {
u32 xdp_txq = txq + vsi->num_xdp_txq;
wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index e50526b491fc..d7cc641643f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -7244,7 +7244,7 @@ int ice_down(struct ice_vsi *vsi)
if (tx_err)
netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
- if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
+ if (!tx_err && vsi->xdp_rings) {
tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
if (tx_err)
netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
@@ -7261,7 +7261,7 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (vsi->xdp_rings)
ice_for_each_xdp_txq(vsi, i)
ice_clean_tx_ring(vsi->xdp_rings[i]);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 2c1a843ba200..5dd50a2866cc 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -39,7 +39,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
- if (ice_is_xdp_ena_vsi(vsi))
+ if (vsi->xdp_rings)
memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
}
@@ -52,7 +52,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi)) {
+ if (vsi->xdp_rings) {
synchronize_rcu();
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
}
@@ -189,7 +189,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
if (err)
return err;
- if (ice_is_xdp_ena_vsi(vsi)) {
+ if (vsi->xdp_rings) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(&txq_meta, 0, sizeof(txq_meta));
--
2.43.0
Powered by blists - more mailing lists