[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250820222308.994949-1-shenwei.wang@nxp.com>
Date: Wed, 20 Aug 2025 17:23:08 -0500
From: Shenwei Wang <shenwei.wang@....com>
To: Wei Fang <wei.fang@....com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>
Cc: Shenwei Wang <shenwei.wang@....com>,
Clark Wang <xiaoning.wang@....com>,
Stanislav Fomichev <sdf@...ichev.me>,
imx@...ts.linux.dev,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-imx@....com
Subject: [PATCH net-next] net: fec: add the Jumbo frame support
Certain i.MX SoCs, such as i.MX8QM and i.MX8QXP, feature enhanced
FEC hardware that supports Ethernet Jumbo frames with packet sizes
up to 16K bytes.
When Jumbo frames are enabled, the TX FIFO may not be large enough
to hold an entire frame. To accommodate this, the FIFO should be
configured to operate in cut-through mode, which allows transmission
to begin once the FIFO reaches a certain threshold.
Signed-off-by: Shenwei Wang <shenwei.wang@....com>
---
drivers/net/ethernet/freescale/fec.h | 6 ++
drivers/net/ethernet/freescale/fec_main.c | 90 ++++++++++++++++++++---
2 files changed, 87 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5c8fdcef759b..6802773c5f34 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -513,6 +513,9 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
+/* Jumbo Frame support */
+#define FEC_QUIRK_JUMBO_FRAME BIT(25)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -619,6 +622,9 @@ struct fec_enet_private {
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ unsigned int max_buf_size;
+ unsigned int pagepool_order;
+ unsigned int rx_frame_size;
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1383918f8a3f..6031f958973e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -167,7 +167,8 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45 |
+ FEC_QUIRK_JUMBO_FRAME,
};
static const struct fec_devinfo fec_s32v234_info = {
@@ -233,6 +234,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* 2048 byte skbufs are allocated. However, alignment requirements
* varies between FEC variants. Worst case is 64, so round down by 64.
*/
+#define MAX_JUMBO_BUF_SIZE (round_down(16384 - 64, 64))
#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
#define PKT_MINBUF_SIZE 64
@@ -481,6 +483,11 @@ fec_enet_create_page_pool(struct fec_enet_private *fep,
};
int err;
+ if (fep->pagepool_order != 0) {
+ pp_params.order = fep->pagepool_order;
+ pp_params.max_len = fep->rx_frame_size;
+ }
+
rxq->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rxq->page_pool)) {
err = PTR_ERR(rxq->page_pool);
@@ -1083,7 +1090,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+ writel(fep->max_buf_size, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
if (i)
@@ -1145,9 +1152,10 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 rcntl = OPT_FRAME_SIZE | FEC_RCR_MII;
+ u32 rcntl = FEC_RCR_MII;
u32 ecntl = FEC_ECR_ETHEREN;
+ rcntl |= (fep->max_buf_size << 16);
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
@@ -1191,7 +1199,7 @@ fec_restart(struct net_device *ndev)
else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+ writel(fep->max_buf_size, fep->hwp + FEC_FTRL);
}
#endif
@@ -1278,8 +1286,16 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= FEC_ECR_BYTESWP;
- /* enable ENET store and forward mode */
- writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
+
+ /* When Jumbo Frame is enabled, the FIFO may not be large enough
+ * to hold an entire frame. In this case, configure the interface
+ * to operate in cut-through mode, triggered by the FIFO threshold.
+ * Otherwise, enable the ENET store-and-forward mode.
+ */
+ if (fep->quirks & FEC_QUIRK_JUMBO_FRAME)
+ writel(0xF, fep->hwp + FEC_X_WMRK);
+ else
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
@@ -1780,7 +1796,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
* These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
- xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_init_buff(&xdp, (PAGE_SIZE << fep->pagepool_order), &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1829,6 +1845,11 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
goto rx_processing_done;
}
+ if (pkt_len > fep->rx_frame_size) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
dma_sync_single_for_cpu(&fep->pdev->dev,
fec32_to_cpu(cbd_bufaddr),
pkt_len,
@@ -1850,7 +1871,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = build_skb(page_address(page), PAGE_SIZE);
+ skb = build_skb(page_address(page), (PAGE_SIZE << fep->pagepool_order));
if (unlikely(!skb)) {
page_pool_recycle_direct(rxq->page_pool, page);
ndev->stats.rx_dropped++;
@@ -4020,6 +4041,47 @@ static int fec_hwtstamp_set(struct net_device *ndev,
return fec_ptp_set(ndev, config, extack);
}
+static int fec_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int order, done;
+ bool running;
+
+ order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN);
+ if (fep->pagepool_order == order) {
+ WRITE_ONCE(ndev->mtu, new_mtu);
+ return 0;
+ }
+
+ fep->pagepool_order = order;
+ fep->rx_frame_size = (PAGE_SIZE << order) - FEC_ENET_XDP_HEADROOM
+ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ running = netif_running(ndev);
+
+ if (running) {
+ napi_disable(&fep->napi);
+ netif_tx_disable(ndev);
+
+ read_poll_timeout(fec_enet_rx_napi, done, (done == 0),
+ 10, 1000, false, &fep->napi, 10);
+
+ fec_stop(ndev);
+ fec_enet_free_buffers(ndev);
+ }
+
+ WRITE_ONCE(ndev->mtu, new_mtu);
+
+ if (running) {
+ fec_enet_alloc_buffers(ndev);
+ fec_restart(ndev);
+ napi_enable(&fep->napi);
+ netif_tx_start_all_queues(ndev);
+ }
+
+ return 0;
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -4029,6 +4091,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
+ .ndo_change_mtu = fec_change_mtu,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
@@ -4559,7 +4622,16 @@ fec_probe(struct platform_device *pdev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&pdev->dev);
- ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
+ fep->pagepool_order = 0;
+ fep->rx_frame_size = FEC_ENET_RX_FRSIZE;
+ fep->max_buf_size = PKT_MAXBUF_SIZE;
+
+ if (fep->quirks & FEC_QUIRK_JUMBO_FRAME)
+ fep->max_buf_size = MAX_JUMBO_BUF_SIZE;
+ else
+ fep->max_buf_size = PKT_MAXBUF_SIZE;
+
+ ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
ret = register_netdev(ndev);
if (ret)
--
2.43.0
Powered by blists - more mailing lists