[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <15943b59b1638515770b7ab841b0d741dc314c3a.1644930125.git.lorenzo@kernel.org>
Date: Tue, 15 Feb 2022 14:08:11 +0100
From: Lorenzo Bianconi <lorenzo@...nel.org>
To: bpf@...r.kernel.org, netdev@...r.kernel.org
Cc: davem@...emloft.net, kuba@...nel.org, ast@...nel.org,
daniel@...earbox.net, brouer@...hat.com, toke@...hat.com,
pabeni@...hat.com, echaudro@...hat.com,
lorenzo.bianconi@...hat.com, toshiaki.makita1@...il.com,
andrii@...nel.org
Subject: [PATCH v2 bpf-next 3/3] veth: allow jumbo frames in xdp mode
Allow increasing the MTU over page boundaries on veth devices
if the attached xdp program declares to support xdp fragments.
Enable NETIF_F_ALL_TSO when the device is running in xdp mode.
Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
---
drivers/net/veth.c | 26 +++++++++++---------------
1 file changed, 11 insertions(+), 15 deletions(-)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a45aaaecc21f..2e048f957bc6 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -292,8 +292,6 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
/* return true if the specified skb has chances of GRO aggregation
* Don't strive for accuracy, but try to avoid GRO overhead in the most
* common scenarios.
- * When XDP is enabled, all traffic is considered eligible, as the xmit
- * device has TSO off.
* When TSO is enabled on the xmit device, we are likely interested only
* in UDP aggregation, explicitly check for that if the skb is suspected
* - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
@@ -334,7 +332,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
* Don't bother with napi/GRO if the skb can't be aggregated
*/
use_napi = rcu_access_pointer(rq->napi) &&
- veth_skb_is_eligible_for_gro(dev, rcv, skb);
+ (rcu_access_pointer(rq->xdp_prog) ||
+ veth_skb_is_eligible_for_gro(dev, rcv, skb));
}
skb_tx_timestamp(skb);
@@ -1508,7 +1507,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct veth_priv *priv = netdev_priv(dev);
struct bpf_prog *old_prog;
struct net_device *peer;
- unsigned int max_mtu;
int err;
old_prog = priv->_xdp_prog;
@@ -1516,6 +1514,8 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
peer = rtnl_dereference(priv->peer);
if (prog) {
+ unsigned int max_mtu;
+
if (!peer) {
NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
err = -ENOTCONN;
@@ -1525,9 +1525,9 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
peer->hard_header_len -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (peer->mtu > max_mtu) {
- NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
- err = -ERANGE;
+ if (!prog->aux->xdp_has_frags && peer->mtu > max_mtu) {
+ NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags");
+ err = -EOPNOTSUPP;
goto err;
}
@@ -1545,10 +1545,8 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
}
- if (!old_prog) {
- peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
- peer->max_mtu = max_mtu;
- }
+ if (!old_prog)
+ peer->hw_features &= ~NETIF_F_GSO_FRAGLIST;
}
if (old_prog) {
@@ -1556,10 +1554,8 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
- if (peer) {
- peer->hw_features |= NETIF_F_GSO_SOFTWARE;
- peer->max_mtu = ETH_MAX_MTU;
- }
+ if (peer)
+ peer->hw_features |= NETIF_F_GSO_FRAGLIST;
}
bpf_prog_put(old_prog);
}
--
2.35.1
Powered by blists - more mailing lists