[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250113063927.4017173-10-michael.chan@broadcom.com>
Date: Sun, 12 Jan 2025 22:39:26 -0800
From: Michael Chan <michael.chan@...adcom.com>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
andrew+netdev@...n.ch,
pavan.chebbi@...adcom.com,
andrew.gospodarek@...adcom.com,
somnath.kotur@...adcom.com,
Ajit Khaparde <ajit.khaparde@...adcom.com>,
David Wei <dw@...idwei.uk>
Subject: [PATCH net-next 09/10] bnxt_en: Extend queue stop/start for Tx rings
From: Somnath Kotur <somnath.kotur@...adcom.com>
In order to use queue_stop/queue_start to support the new Steering
Tags, we need to free the TX ring and TX completion ring if it is a
combined channel with TX/RX sharing the same NAPI. Otherwise
TX completions will not have the updated Steering Tag. With that
we can now add napi_disable() and napi_enable() during queue_stop()/
queue_start(). This will guarantee that NAPI will stop processing
the completion entries in case there are additional pending entries
in the completion rings after queue_stop().
There could be some NQEs sitting unprocessed while NAPI is disabled
thereby leaving the NQ unarmed. Explictily Re-arm the NQ after
napi_enable() in queue start so that NAPI will resume properly.
Reviewed-by: Ajit Khaparde <ajit.khaparde@...adcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@...adcom.com>
Reviewed-by: Michael Chan <michael.chan@...adcom.com>
---
Cc: David Wei <dw@...idwei.uk>
Discussion about adding napi_disable()/napi_enable():
https://lore.kernel.org/netdev/5336d624-8d8b-40a6-b732-b020e4a119a2@davidwei.uk/#t
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 99 ++++++++++++++++++++++-
1 file changed, 98 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index fe350d0ba99c..eddb4de959c6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7341,6 +7341,22 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
return 0;
}
+static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
@@ -11247,6 +11263,69 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
return 0;
}
+static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ bnapi = bp->bnapi[idx];
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
+ synchronize_net();
+
+ if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq) {
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+ }
+ }
+ bnxt_hwrm_tx_ring_free(bp, txr, true);
+ bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+ bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
+ bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
+ }
+}
+
+static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int rc, i;
+
+ bnapi = bp->bnapi[idx];
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
+ if (rc) {
+ bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+ return rc;
+ }
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
+
+ WRITE_ONCE(txr->dev_state, 0);
+ synchronize_net();
+
+ if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
+ continue;
+
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq)
+ netif_tx_start_queue(txq);
+ }
+
+ return 0;
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -15647,6 +15726,16 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
cpr = &rxr->bnapi->cp_ring;
cpr->sw_stats->rx.rx_resets++;
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+ rc = bnxt_tx_queue_start(bp, idx);
+ if (rc)
+ netdev_warn(bp->dev,
+ "tx queue restart failed: rc=%d\n", rc);
+ }
+
+ napi_enable(&rxr->bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
vnic = &bp->vnic_info[i];
@@ -15675,6 +15764,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i;
for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
@@ -15686,15 +15776,22 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
/* Make sure NAPI sees that the VNIC is disabled */
synchronize_net();
rxr = &bp->rx_ring[idx];
- cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
+ bnapi = rxr->bnapi;
+ cancel_work_sync(&bnapi->cp_ring.dim.work);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
page_pool_disable_direct_recycling(rxr->page_pool);
if (bnxt_separate_head_pool())
page_pool_disable_direct_recycling(rxr->head_pool);
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ bnxt_tx_queue_stop(bp, idx);
+
+ napi_disable(&bnapi->napi);
+
bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem);
--
2.30.1
Powered by blists - more mailing lists