[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230103064012.108029-5-hengqi@linux.alibaba.com>
Date: Tue, 3 Jan 2023 14:40:07 +0800
From: Heng Qi <hengqi@...ux.alibaba.com>
To: netdev@...r.kernel.org, bpf@...r.kernel.org
Cc: Jason Wang <jasowang@...hat.com>,
"Michael S . Tsirkin" <mst@...hat.com>,
Paolo Abeni <pabeni@...hat.com>,
Jakub Kicinski <kuba@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
"David S . Miller" <davem@...emloft.net>,
Daniel Borkmann <daniel@...earbox.net>,
Alexei Starovoitov <ast@...nel.org>,
Eric Dumazet <edumazet@...gle.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
Subject: [PATCH v3 4/9] virtio-net: build xdp_buff with multi buffers
Support xdp for multi buffer packets in mergeable mode.
Putting the first buffer as the linear part for xdp_buff,
and the rest of the buffers as non-linear fragments to struct
skb_shared_info in the tailroom belonging to xdp_buff.
Let 'truesize' return to its literal meaning, that is, when
xdp is set, it includes the length of headroom and tailroom.
Signed-off-by: Heng Qi <hengqi@...ux.alibaba.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
---
drivers/net/virtio_net.c | 108 ++++++++++++++++++++++++++++++++++++---
1 file changed, 100 insertions(+), 8 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6fc5302ca5ff..699e376b8f8b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -931,6 +931,91 @@ static struct sk_buff *receive_big(struct net_device *dev,
return NULL;
}
+/* TODO: build xdp in big mode */
+static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
+ struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct xdp_buff *xdp,
+ void *buf,
+ unsigned int len,
+ unsigned int frame_sz,
+ u16 *num_buf,
+ unsigned int *xdp_frags_truesize,
+ struct virtnet_rq_stats *stats)
+{
+ struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+ unsigned int headroom, tailroom, room;
+ unsigned int truesize, cur_frag_size;
+ struct skb_shared_info *shinfo;
+ unsigned int xdp_frags_truesz = 0;
+ struct page *page;
+ skb_frag_t *frag;
+ int offset;
+ void *ctx;
+
+ xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
+ VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
+
+ if (*num_buf > 1) {
+ /* If we want to build multi-buffer xdp, we need
+ * to specify that the flags of xdp_buff have the
+ * XDP_FLAGS_HAS_FRAG bit.
+ */
+ if (!xdp_buff_has_frags(xdp))
+ xdp_buff_set_frags_flag(xdp);
+
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+ shinfo->nr_frags = 0;
+ shinfo->xdp_frags_size = 0;
+ }
+
+ if ((*num_buf - 1) > MAX_SKB_FRAGS)
+ return -EINVAL;
+
+ while ((--*num_buf) >= 1) {
+ buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+ if (unlikely(!buf)) {
+ pr_debug("%s: rx error: %d buffers out of %d missing\n",
+ dev->name, *num_buf,
+ virtio16_to_cpu(vi->vdev, hdr->num_buffers));
+ dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ stats->bytes += len;
+ page = virt_to_head_page(buf);
+ offset = buf - page_address(page);
+
+ truesize = mergeable_ctx_to_truesize(ctx);
+ headroom = mergeable_ctx_to_headroom(ctx);
+ tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ room = SKB_DATA_ALIGN(headroom + tailroom);
+
+ cur_frag_size = truesize;
+ xdp_frags_truesz += cur_frag_size;
+ if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
+ put_page(page);
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)(truesize - room));
+ dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ frag = &shinfo->frags[shinfo->nr_frags++];
+ __skb_frag_set_page(frag, page);
+ skb_frag_off_set(frag, offset);
+ skb_frag_size_set(frag, len);
+ if (page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ shinfo->xdp_frags_size += len;
+ }
+
+ *xdp_frags_truesize = xdp_frags_truesz;
+ return 0;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
@@ -949,15 +1034,17 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
unsigned int metasize = 0;
+ unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
unsigned int frame_sz;
int err;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
- if (unlikely(len > truesize)) {
+ if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)ctx);
+ dev->name, len, (unsigned long)(truesize - room));
dev->stats.rx_length_errors++;
goto err_skb;
}
@@ -983,10 +1070,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
- /* Buffers with headroom use PAGE_SIZE as alloc size,
- * see add_recvbuf_mergeable() + get_mergeable_buf_len()
+ /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
+ * with headroom may add hole in truesize, which
+ * make their length exceed PAGE_SIZE. So we disabled the
+ * hole mechanism for xdp. See add_recvbuf_mergeable().
*/
- frame_sz = headroom ? PAGE_SIZE : truesize;
+ frame_sz = truesize;
/* This happens when rx buffer size is underestimated
* or headroom is not enough because of the buffer
@@ -1139,9 +1228,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
page = virt_to_head_page(buf);
truesize = mergeable_ctx_to_truesize(ctx);
- if (unlikely(len > truesize)) {
+ headroom = mergeable_ctx_to_headroom(ctx);
+ tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ room = SKB_DATA_ALIGN(headroom + tailroom);
+ if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)ctx);
+ dev->name, len, (unsigned long)(truesize - room));
dev->stats.rx_length_errors++;
goto err_skb;
}
@@ -1428,7 +1520,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
}
sg_init_one(rq->sg, buf, len);
- ctx = mergeable_len_to_ctx(len, headroom);
+ ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists