[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241118004509.200828-1-qingtao.cao@digi.com>
Date: Mon, 18 Nov 2024 10:45:09 +1000
From: Qingtao Cao <qingtao.cao.au@...il.com>
To:
Cc: Qingtao Cao <qingtao.cao@...i.com>,
Sebastian Hesselbarth <sebastian.hesselbarth@...il.com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH net-next v1 1/1] net: mv643xx_eth: disable IP tx checksum with jumbo frames for Armada 310
The Ethernet controller found in Armada 310 doesn't support TCP/IP checksum
with frame sizes larger than its TX checksum offload limit
When the path MTU is larger than this limit, the skb_warn_bad_offload will
throw out a warning oops
Disable the TX checksum offload (NETIF_F_IP_CSUM) when the MTU is set to a
value larger than this limit, the NETIF_F_TSO will automatically be disabled
as a result and the IP stack will calculate jumbo frames' checksum instead.
Signed-off-by: Qingtao Cao <qingtao.cao@...i.com>
---
drivers/net/ethernet/marvell/mv643xx_eth.c | 23 +++++++++++++++++++++-
1 file changed, 22 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 9e80899546d9..808877dd3549 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2558,6 +2558,23 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return ret;
}
+static netdev_features_t mv643xx_eth_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (mp->shared->tx_csum_limit &&
+ dev->mtu > mp->shared->tx_csum_limit) {
+ /* Kernel disables TSO when there is no TX checksum offload */
+ features &= ~NETIF_F_IP_CSUM;
+ netdev_info(dev,
+ "Disable IP TX checksum and TSO offload for MTU > %dB\n",
+ mp->shared->tx_csum_limit);
+ }
+
+ return features;
+}
+
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -2566,8 +2583,10 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
mv643xx_eth_recalc_skb_size(mp);
tx_set_rate(mp, 1000000000, 16777216);
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ netdev_update_features(dev);
return 0;
+ }
/*
* Stop and then re-open the interface. This will allocate RX
@@ -2581,6 +2600,7 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
"fatal error on re-opening device after MTU change\n");
}
+ netdev_update_features(dev);
return 0;
}
@@ -3079,6 +3099,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = {
.ndo_set_mac_address = mv643xx_eth_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = mv643xx_eth_ioctl,
+ .ndo_fix_features = mv643xx_eth_fix_features,
.ndo_change_mtu = mv643xx_eth_change_mtu,
.ndo_set_features = mv643xx_eth_set_features,
.ndo_tx_timeout = mv643xx_eth_tx_timeout,
--
2.34.1
Powered by blists - more mailing lists