[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <cd6225a065ce8f2e2fe052c2e02ecf78e7ee9d4f.1750176076.git.pabeni@redhat.com>
Date: Tue, 17 Jun 2025 18:12:13 +0200
From: Paolo Abeni <pabeni@...hat.com>
To: netdev@...r.kernel.org
Cc: Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Jason Wang <jasowang@...hat.com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
"Michael S. Tsirkin" <mst@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eugenio Pérez <eperezma@...hat.com>,
Yuri Benditovich <yuri.benditovich@...nix.com>,
Akihiko Odaki <akihiko.odaki@...nix.com>
Subject: [PATCH v4 net-next 6/8] virtio_net: enable gso over UDP tunnel support.
If the related virtio feature is set, enable transmission and reception
of gso over UDP tunnel packets.
Most of the work is done by the previously introduced helper, just need
to determine the UDP tunnel features inside the virtio_net_hdr and
update accordingly the virtio net hdr size.
Signed-off-by: Paolo Abeni <pabeni@...hat.com>
---
v3 -> v4:
- tnl header fields at constant offset, update the parsing accordingly
- keep separate status for tnl offload tx and tnl offload rx
- drop redundant comment
v2 -> v3:
- drop the VIRTIO_HAS_EXTENDED_FEATURES conditionals
v1 -> v2:
- test for UDP_TUNNEL_GSO* only on builds with extended features support
- comment indentation cleanup
- rebased on top of virtio helpers changes
- dump more information in case of bad offloads
---
drivers/net/virtio_net.c | 85 ++++++++++++++++++++++++++++++----------
1 file changed, 64 insertions(+), 21 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e6c0a9660bb4..edf0197526f5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -78,15 +78,19 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GUEST_USO4,
VIRTIO_NET_F_GUEST_USO6,
- VIRTIO_NET_F_GUEST_HDRLEN
+ VIRTIO_NET_F_GUEST_HDRLEN,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED,
};
#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
- (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
- (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
- (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
- (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
- (1ULL << VIRTIO_NET_F_GUEST_USO6))
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
+ (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
+ (1ULL << VIRTIO_NET_F_GUEST_USO6) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED))
struct virtnet_stat_desc {
char desc[ETH_GSTRING_LEN];
@@ -439,6 +443,13 @@ struct virtnet_info {
/* Work struct for delayed refilling if we run low on memory. */
struct delayed_work refill;
+ /* UDP tunnel support */
+ bool tx_tnl;
+
+ bool rx_tnl;
+
+ bool rx_tnl_csum;
+
/* Is delayed refill enabled? */
bool refill_enabled;
@@ -498,6 +509,7 @@ struct virtio_net_common_hdr {
struct virtio_net_hdr hdr;
struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
struct virtio_net_hdr_v1_hash hash_v1_hdr;
+ struct virtio_net_hdr_v1_hash_tunnel tnl_hdr;
};
};
@@ -2531,14 +2543,21 @@ static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *
if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
- if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ hdr->hdr.flags = flags;
+ if (virtio_net_handle_csum_offload(skb, &hdr->hdr, vi->rx_tnl_csum)) {
+ net_warn_ratelimited("%s: bad csum: flags: %x, gso_type: %x rx_tnl_csum %d\n",
+ dev->name, hdr->hdr.flags,
+ hdr->hdr.gso_type, vi->rx_tnl_csum);
+ goto frame_err;
+ }
- if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
- virtio_is_little_endian(vi->vdev))) {
- net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
+ if (virtio_net_hdr_tnl_to_skb(skb, &hdr->tnl_hdr, vi->rx_tnl,
+ vi->rx_tnl_csum,
+ virtio_is_little_endian(vi->vdev))) {
+ net_warn_ratelimited("%s: bad gso: type: %x, size: %u, flags %x tunnel %d tnl csum %d\n",
dev->name, hdr->hdr.gso_type,
- hdr->hdr.gso_size);
+ hdr->hdr.gso_size, hdr->hdr.flags,
+ vi->rx_tnl, vi->rx_tnl_csum);
goto frame_err;
}
@@ -3250,9 +3269,9 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
{
- struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct virtio_net_hdr_v1_hash_tunnel *hdr;
int num_sg;
unsigned hdr_len = vi->hdr_len;
bool can_push;
@@ -3265,17 +3284,17 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
/* Even if we can, don't push here yet as this would skew
* csum_start offset below. */
if (can_push)
- hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
+ hdr = (struct virtio_net_hdr_v1_hash_tunnel *)(skb->data -
+ hdr_len);
else
- hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
+ hdr = &skb_vnet_common_hdr(skb)->tnl_hdr;
- if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
- virtio_is_little_endian(vi->vdev), false,
- 0))
+ if (virtio_net_hdr_tnl_from_skb(skb, hdr, vi->tx_tnl,
+ virtio_is_little_endian(vi->vdev), 0))
return -EPROTO;
if (vi->mergeable_rx_bufs)
- hdr->num_buffers = 0;
+ hdr->hash_hdr.hdr.num_buffers = 0;
sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
if (can_push) {
@@ -6764,10 +6783,20 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
dev->hw_features |= NETIF_F_GSO_UDP_L4;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO)) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_enc_features = dev->hw_features;
+ }
+ if (dev->hw_features & NETIF_F_GSO_UDP_TUNNEL &&
+ virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM)) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ dev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+
dev->features |= NETIF_F_GSO_ROBUST;
if (gso)
- dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ dev->features |= dev->hw_features;
/* (!csum && gso) case will be fixed by register_netdev() */
}
@@ -6860,7 +6889,10 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
}
- if (vi->has_rss_hash_report)
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO))
+ vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash_tunnel);
+ else if (vi->has_rss_hash_report)
vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
@@ -6868,6 +6900,13 @@ static int virtnet_probe(struct virtio_device *vdev)
else
vi->hdr_len = sizeof(struct virtio_net_hdr);
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM))
+ vi->rx_tnl_csum = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO))
+ vi->rx_tnl = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO))
+ vi->tx_tnl = true;
+
if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->any_header_sg = true;
@@ -7178,6 +7217,10 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTNET_FEATURES,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM,
+ VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO,
+ VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM,
};
static unsigned int features_legacy[] = {
--
2.49.0
Powered by blists - more mailing lists