[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <930b1ad3d84f7ca5a41ba75571f9146a932c5394.1646755129.git.lorenzo@kernel.org>
Date: Tue, 8 Mar 2022 17:06:00 +0100
From: Lorenzo Bianconi <lorenzo@...nel.org>
To: bpf@...r.kernel.org, netdev@...r.kernel.org
Cc: davem@...emloft.net, kuba@...nel.org, ast@...nel.org,
daniel@...earbox.net, brouer@...hat.com, toke@...hat.com,
pabeni@...hat.com, echaudro@...hat.com,
lorenzo.bianconi@...hat.com, toshiaki.makita1@...il.com,
andrii@...nel.org
Subject: [PATCH v4 bpf-next 3/3] veth: allow jumbo frames in xdp mode
Allow increasing the MTU over page boundaries on veth devices
if the attached xdp program declares to support xdp fragments.
Enable NETIF_F_ALL_TSO when the device is running in xdp mode.
Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
---
drivers/net/veth.c | 28 +++++++++++++++++-----------
1 file changed, 17 insertions(+), 11 deletions(-)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 47b21b1d2fd9..c5a2dc2b2e4b 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -293,8 +293,7 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
/* return true if the specified skb has chances of GRO aggregation
* Don't strive for accuracy, but try to avoid GRO overhead in the most
* common scenarios.
- * When XDP is enabled, all traffic is considered eligible, as the xmit
- * device has TSO off.
+ * When XDP is enabled, all traffic is considered eligible.
* When TSO is enabled on the xmit device, we are likely interested only
* in UDP aggregation, explicitly check for that if the skb is suspected
* - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
@@ -302,11 +301,13 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
*/
static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
const struct net_device *rcv,
+ const struct veth_rq *rq,
const struct sk_buff *skb)
{
- return !(dev->features & NETIF_F_ALL_TSO) ||
- (skb->destructor == sock_wfree &&
- rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
+ return rcu_access_pointer(rq->xdp_prog) ||
+ !(dev->features & NETIF_F_ALL_TSO) ||
+ (skb->destructor == sock_wfree &&
+ rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
}
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -335,7 +336,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
* Don't bother with napi/GRO if the skb can't be aggregated
*/
use_napi = rcu_access_pointer(rq->napi) &&
- veth_skb_is_eligible_for_gro(dev, rcv, skb);
+ veth_skb_is_eligible_for_gro(dev, rcv, rq, skb);
}
skb_tx_timestamp(skb);
@@ -1525,9 +1526,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
- peer->hard_header_len -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
+ peer->hard_header_len;
+ /* Allow increasing the max_mtu if the program supports
+ * XDP fragments.
+ */
+ if (prog->aux->xdp_has_frags)
+ max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
+
if (peer->mtu > max_mtu) {
NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
err = -ERANGE;
@@ -1549,7 +1555,7 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (!old_prog) {
- peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
+ peer->hw_features &= ~NETIF_F_GSO_FRAGLIST;
peer->max_mtu = max_mtu;
}
}
@@ -1560,7 +1566,7 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
veth_disable_xdp(dev);
if (peer) {
- peer->hw_features |= NETIF_F_GSO_SOFTWARE;
+ peer->hw_features |= NETIF_F_GSO_FRAGLIST;
peer->max_mtu = ETH_MAX_MTU;
}
}
--
2.35.1
Powered by blists - more mailing lists