[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230725074148.2936402-1-wei.fang@nxp.com>
Date: Tue, 25 Jul 2023 15:41:48 +0800
From: Wei Fang <wei.fang@....com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
shenwei.wang@....com,
xiaoning.wang@....com,
pabeni@...hat.com,
netdev@...r.kernel.org
Cc: linux-imx@....com,
linux-kernel@...r.kernel.org
Subject: [PATCH net] net: fec: tx processing does not call XDP APIs if budget is 0
According to the clarification [1] in the latest napi.rst, the tx
processing cannot call any XDP (or page pool) APIs if the "budget"
is 0. Because NAPI is called with the budget of 0 (such as netpoll)
indicates we may be in an IRQ context, however, we cannot use the
page pool from IRQ context.
[1] https://lore.kernel.org/all/20230720161323.2025379-1-kuba@kernel.org/
Fixes: 20f797399035 ("net: fec: recycle pages for transmitted XDP frames")
Signed-off-by: Wei Fang <wei.fang@....com>
Suggested-by: Jakub Kicinski <kuba@...nel.org>
---
drivers/net/ethernet/freescale/fec_main.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 073d61619336..66b5cbdb43b9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1372,7 +1372,7 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
}
static void
-fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep;
struct xdp_frame *xdpf;
@@ -1416,6 +1416,14 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
if (!skb)
goto tx_buf_done;
} else {
+ /* Tx processing cannot call any XDP (or page pool) APIs if
+ * the "budget" is 0. Because NAPI is called with budget of
+ * 0 (such as netpoll) indicates we may be in an IRQ context,
+ * however, we can't use the page pool from IRQ context.
+ */
+ if (unlikely(!budget))
+ break;
+
xdpf = txq->tx_buf[index].xdp;
if (bdp->cbd_bufaddr)
dma_unmap_single(&fep->pdev->dev,
@@ -1508,14 +1516,14 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
writel(0, txq->bd.reg_desc_active);
}
-static void fec_enet_tx(struct net_device *ndev)
+static void fec_enet_tx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int i;
/* Make sure that AVB queues are processed first. */
for (i = fep->num_tx_queues - 1; i >= 0; i--)
- fec_enet_tx_queue(ndev, i);
+ fec_enet_tx_queue(ndev, i, budget);
}
static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
@@ -1858,7 +1866,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
do {
done += fec_enet_rx(ndev, budget - done);
- fec_enet_tx(ndev);
+ fec_enet_tx(ndev, budget);
} while ((done < budget) && fec_enet_collect_events(fep));
if (done < budget) {
--
2.25.1
Powered by blists - more mailing lists