[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250612094523.1615719-1-m-malladi@ti.com>
Date: Thu, 12 Jun 2025 15:15:23 +0530
From: Meghana Malladi <m-malladi@...com>
To: <namcao@...utronix.de>, <m-malladi@...com>, <john.fastabend@...il.com>,
<hawk@...nel.org>, <daniel@...earbox.net>, <ast@...nel.org>,
<pabeni@...hat.com>, <kuba@...nel.org>, <edumazet@...gle.com>,
<davem@...emloft.net>, <andrew+netdev@...n.ch>
CC: <bpf@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<netdev@...r.kernel.org>, <linux-arm-kernel@...ts.infradead.org>,
<srk@...com>, Vignesh Raghavendra <vigneshr@...com>,
Roger Quadros
<rogerq@...nel.org>, <danishanwar@...com>
Subject: [PATCH net] net: ti: icssg-prueth: Fix packet handling for XDP_TX
While transmitting XDP frames for XDP_TX, page_pool is
used to get the DMA buffers (already mapped to the pages)
and need to be freed/reycled once the transmission is complete.
This need not be explicitly done by the driver as this is handled
more gracefully by the xdp driver while returning the xdp frame.
__xdp_return() frees the XDP memory based on its memory type,
under which page_pool memory is also handled. This change fixes
the transmit queue timeout while running XDP_TX.
logs:
[ 309.069682] icssg-prueth icssg1-eth eth2: NETDEV WATCHDOG: CPU: 0: transmit queue 0 timed out 45860 ms
[ 313.933780] icssg-prueth icssg1-eth eth2: NETDEV WATCHDOG: CPU: 0: transmit queue 0 timed out 50724 ms
[ 319.053656] icssg-prueth icssg1-eth eth2: NETDEV WATCHDOG: CPU: 0: transmit queue 0 timed out 55844 ms
...
Signed-off-by: Meghana Malladi <m-malladi@...com>
---
drivers/net/ethernet/ti/icssg/icssg_common.c | 19 ++-----------------
1 file changed, 2 insertions(+), 17 deletions(-)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 5b8fdb882172..12f25cec6255 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -98,20 +98,11 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
- struct prueth_swdata *swdata;
- struct page *page;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
- swdata = cppi5_hdesc_get_swdata(desc);
- if (swdata->type == PRUETH_SWDATA_PAGE) {
- page = swdata->data.page;
- page_pool_recycle_direct(page->pp, swdata->data.page);
- goto free_desc;
- }
-
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -135,7 +126,6 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
-free_desc:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -612,13 +602,8 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- if (page) {
- swdata->type = PRUETH_SWDATA_PAGE;
- swdata->data.page = page;
- } else {
- swdata->type = PRUETH_SWDATA_XDPF;
- swdata->data.xdpf = xdpf;
- }
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
/* Report BQL before sending the packet */
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
base-commit: 5d6d67c4cb10a4b4d3ae35758d5eeed6239afdc8
--
2.43.0
Powered by blists - more mailing lists