[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260121033357.3261464-8-wei.fang@nxp.com>
Date: Wed, 21 Jan 2026 11:33:49 +0800
From: Wei Fang <wei.fang@....com>
To: shenwei.wang@....com,
xiaoning.wang@....com,
frank.li@....com,
andrew+netdev@...n.ch,
davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
ast@...nel.org,
daniel@...earbox.net,
hawk@...nel.org,
john.fastabend@...il.com,
sdf@...ichev.me
Cc: netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
imx@...ts.linux.dev,
bpf@...r.kernel.org
Subject: [PATCH v3 net-next 07/15] net: fec: add tx_qid parameter to fec_enet_xdp_tx_xmit()
Remove fec_enet_xdp_get_tx_queue() from fec_enet_xdp_tx_xmit() and add
the tx_qid parameter to it. Then, calculate the TX queue ID for XDP_TX
frames in fec_enet_rx_queue_xdp(). This way, the TX queue ID only needs
to be calculated once for XDP_TX frames during each NAPI polling. And
since the number of RX queues and TX queues in FEC is generally equal,
the RX queue ID can be directly used as the TX queue ID. In exceptional
cases, fec_enet_xdp_get_tx_queue() is used to calculate the TX queue ID.
Signed-off-by: Wei Fang <wei.fang@....com>
---
drivers/net/ethernet/freescale/fec_main.c | 38 +++++++++++------------
1 file changed, 19 insertions(+), 19 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 86447a7cb963..b60c736da027 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -79,7 +79,7 @@ static void set_multicast_list(struct net_device *ndev);
static void fec_enet_itr_coal_set(struct net_device *ndev);
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
int cpu, struct xdp_buff *xdp,
- u32 dma_sync_len);
+ u32 dma_sync_len, int queue);
#define DRIVER_NAME "fec"
@@ -1889,6 +1889,15 @@ static void fec_xdp_drop(struct fec_enet_priv_rx_q *rxq,
page_pool_put_page(rxq->page_pool, page, sync, true);
}
+static int
+fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
+{
+ if (unlikely(index < 0))
+ return 0;
+
+ return (index % fep->num_tx_queues);
+}
+
static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
int budget, struct bpf_prog *prog)
{
@@ -1902,6 +1911,7 @@ static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
struct sk_buff *skb;
u16 status, pkt_len;
struct xdp_buff xdp;
+ int tx_qid = queue;
struct page *page;
u32 xdp_res = 0;
dma_addr_t dma;
@@ -1916,6 +1926,9 @@ static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
flush_cache_all();
#endif
+ if (unlikely(tx_qid >= fep->num_tx_queues))
+ tx_qid = fec_enet_xdp_get_tx_queue(fep, cpu);
+
xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1988,7 +2001,7 @@ static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
break;
case XDP_TX:
rxq->stats[RX_XDP_TX]++;
- err = fec_enet_xdp_tx_xmit(fep, cpu, &xdp, sync);
+ err = fec_enet_xdp_tx_xmit(fep, cpu, &xdp, sync, tx_qid);
if (unlikely(err)) {
rxq->stats[RX_XDP_TX_ERRORS]++;
fec_xdp_drop(rxq, &xdp, sync);
@@ -3938,15 +3951,6 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
}
}
-static int
-fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
-{
- if (unlikely(index < 0))
- return 0;
-
- return (index % fep->num_tx_queues);
-}
-
static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
struct fec_enet_priv_tx_q *txq,
void *frame, u32 dma_sync_len,
@@ -4040,15 +4044,11 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
int cpu, struct xdp_buff *xdp,
- u32 dma_sync_len)
+ u32 dma_sync_len, int queue)
{
- struct fec_enet_priv_tx_q *txq;
- struct netdev_queue *nq;
- int queue, ret;
-
- queue = fec_enet_xdp_get_tx_queue(fep, cpu);
- txq = fep->tx_queue[queue];
- nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct netdev_queue *nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ int ret;
__netif_tx_lock(nq, cpu);
--
2.34.1
Powered by blists - more mailing lists