[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230626120301.380-2-hengqi@linux.alibaba.com>
Date: Mon, 26 Jun 2023 20:03:00 +0800
From: Heng Qi <hengqi@...ux.alibaba.com>
To: netdev@...r.kernel.org,
bpf@...r.kernel.org
Cc: "Michael S . Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
"David S . Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>
Subject: [PATCH net-next v3 1/2] virtio-net: support coexistence of XDP and GUEST_CSUM
We are now re-probing the csum related fields and trying
to have XDP and RX hw checksum capabilities coexist on the
XDP path. For the benefit of:
1. RX hw checksum capability can be used if XDP is loaded.
2. Avoid packet loss when loading XDP in the vm-vm scenario.
Signed-off-by: Heng Qi <hengqi@...ux.alibaba.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
---
v2->v3:
- Use skb_checksum_setup() instead of virtnet_flow_dissect_udp_tcp().
Essentially equivalent.
drivers/net/virtio_net.c | 86 ++++++++++++++++++++++++++++++++++------
1 file changed, 73 insertions(+), 13 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5a7f7a76b920..0a715e0fbc97 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1568,6 +1568,44 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
}
+static int virtnet_set_csum_after_xdp(struct virtnet_info *vi,
+ struct sk_buff *skb,
+ __u8 flags)
+{
+ int err = 0;
+
+ /* When XDP program is loaded, for example, the vm-vm scenario
+ * on the same host, packets marked as VIRTIO_NET_HDR_F_NEEDS_CSUM
+ * will travel. Although these packets are safe from the point of
+ * view of the vm, to avoid modification by XDP and successful
+ * forwarding in the upper layer, we re-probe the necessary checksum
+ * related information: skb->csum_{start, offset}, pseudo-header csum
+ * using skb_chdcksum_setup().
+ *
+ * This benefits us:
+ * 1. XDP can be loaded when there's _F_GUEST_CSUM.
+ * 2. The device verifies the checksum of packets, especially
+ * benefiting for large packets.
+ * 3. In the same-host vm-vm scenario, packets marked as
+ * VIRTIO_NET_HDR_F_NEEDS_CSUM are no longer dropped after being
+ * processed by XDP.
+ */
+ if (flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ /* We don't parse SCTP because virtio-net currently doesn't
+ * support CRC checksum offloading for SCTP.
+ */
+ err = skb_checksum_setup(skb, true);
+ } else if (flags & VIRTIO_NET_HDR_F_DATA_VALID) {
+ /* We want to benefit from this: XDP guarantees that packets marked
+ * as VIRTIO_NET_HDR_F_DATA_VALID still have correct csum after they
+ * are processed.
+ */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ return err;
+}
+
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
void *buf, unsigned int len, void **ctx,
unsigned int *xdp_xmit,
@@ -1576,6 +1614,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
struct net_device *dev = vi->dev;
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
+ __u8 flags;
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -1584,6 +1623,13 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
}
+ /* Save the flags of the hdr before XDP processes the data.
+ * It is ok to use this for both mergeable and small modes.
+ * Because that's what we do now.
+ */
+ if (unlikely(vi->xdp_enabled))
+ flags = ((struct virtio_net_hdr_mrg_rxbuf *)buf)->hdr.flags;
+
if (vi->mergeable_rx_bufs)
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
stats);
@@ -1595,23 +1641,37 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(!skb))
return;
- hdr = skb_vnet_hdr(skb);
- if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
- virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
-
- if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (unlikely(vi->xdp_enabled)) {
+ /* Required to do this before re-probing and calculating
+ * the pseudo-header checksum.
+ */
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_reset_network_header(skb);
+ if (virtnet_set_csum_after_xdp(vi, skb, flags) < 0) {
+ pr_debug("%s: errors occurred in setting partial csum",
+ dev->name);
+ goto frame_err;
+ }
+ } else {
+ hdr = skb_vnet_hdr(skb);
+ if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
+ virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
+
+ if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
+ virtio_is_little_endian(vi->vdev))) {
+ net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
+ dev->name, hdr->hdr.gso_type,
+ hdr->hdr.gso_size);
+ goto frame_err;
+ }
- if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
- virtio_is_little_endian(vi->vdev))) {
- net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
- dev->name, hdr->hdr.gso_type,
- hdr->hdr.gso_size);
- goto frame_err;
+ skb->protocol = eth_type_trans(skb, dev);
}
skb_record_rx_queue(skb, vq2rxq(rq->vq));
- skb->protocol = eth_type_trans(skb, dev);
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists