[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a5b743d1-37d1-1225-c1cb-62cd23d26aef@redhat.com>
Date: Fri, 31 Mar 2023 17:14:33 +0800
From: Jason Wang <jasowang@...hat.com>
To: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>, netdev@...r.kernel.org
Cc: "Michael S. Tsirkin" <mst@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
virtualization@...ts.linux-foundation.org, bpf@...r.kernel.org
Subject: Re: [PATCH net-next 2/8] virtio_net: mergeable xdp: introduce
mergeable_xdp_prepare
在 2023/3/28 20:04, Xuan Zhuo 写道:
> Separating the logic of preparation for xdp from receive_mergeable.
>
> The purpose of this is to simplify the logic of execution of XDP.
>
> The main logic here is that when headroom is insufficient, we need to
> allocate a new page and calculate offset. It should be noted that if
> there is new page, the variable page will refer to the new page.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> ---
> drivers/net/virtio_net.c | 135 ++++++++++++++++++++++-----------------
> 1 file changed, 77 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 4d2bf1ce0730..bb426958cdd4 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1162,6 +1162,79 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
> return 0;
> }
>
> +static void *mergeable_xdp_prepare(struct virtnet_info *vi,
> + struct receive_queue *rq,
> + struct bpf_prog *xdp_prog,
> + void *ctx,
> + unsigned int *frame_sz,
> + int *num_buf,
> + struct page **page,
> + int offset,
> + unsigned int *len,
> + struct virtio_net_hdr_mrg_rxbuf *hdr)
> +{
> + unsigned int truesize = mergeable_ctx_to_truesize(ctx);
> + unsigned int headroom = mergeable_ctx_to_headroom(ctx);
> + struct page *xdp_page;
> + unsigned int xdp_room;
> +
> + /* Transient failure which in theory could occur if
> + * in-flight packets from before XDP was enabled reach
> + * the receive path after XDP is loaded.
> + */
> + if (unlikely(hdr->hdr.gso_type))
> + return NULL;
> +
> + /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
> + * with headroom may add hole in truesize, which
> + * make their length exceed PAGE_SIZE. So we disabled the
> + * hole mechanism for xdp. See add_recvbuf_mergeable().
> + */
> + *frame_sz = truesize;
> +
> + /* This happens when headroom is not enough because
> + * of the buffer was prefilled before XDP is set.
> + * This should only happen for the first several packets.
> + * In fact, vq reset can be used here to help us clean up
> + * the prefilled buffers, but many existing devices do not
> + * support it, and we don't want to bother users who are
> + * using xdp normally.
> + */
> + if (!xdp_prog->aux->xdp_has_frags &&
> + (*num_buf > 1 || headroom < virtnet_get_headroom(vi))) {
> + /* linearize data for XDP */
> + xdp_page = xdp_linearize_page(rq, num_buf,
> + *page, offset,
> + VIRTIO_XDP_HEADROOM,
> + len);
> +
> + if (!xdp_page)
> + return NULL;
> + } else if (unlikely(headroom < virtnet_get_headroom(vi))) {
> + xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
> + sizeof(struct skb_shared_info));
> + if (*len + xdp_room > PAGE_SIZE)
> + return NULL;
> +
> + xdp_page = alloc_page(GFP_ATOMIC);
> + if (!xdp_page)
> + return NULL;
> +
> + memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
> + page_address(*page) + offset, *len);
> + } else {
> + return page_address(*page) + offset;
This makes the code a little harder to be read than the original code.
Why not do a verbatim moving without introducing new logic? (Or
introducing new logic on top?)
Thanks
> + }
> +
> + *frame_sz = PAGE_SIZE;
> +
> + put_page(*page);
> +
> + *page = xdp_page;
> +
> + return page_address(xdp_page) + VIRTIO_XDP_HEADROOM;
> +}
> +
> static struct sk_buff *receive_mergeable(struct net_device *dev,
> struct virtnet_info *vi,
> struct receive_queue *rq,
> @@ -1181,7 +1254,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> unsigned int headroom = mergeable_ctx_to_headroom(ctx);
> unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
> unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
> - unsigned int frame_sz, xdp_room;
> + unsigned int frame_sz;
> int err;
>
> head_skb = NULL;
> @@ -1211,65 +1284,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> u32 act;
> int i;
>
> - /* Transient failure which in theory could occur if
> - * in-flight packets from before XDP was enabled reach
> - * the receive path after XDP is loaded.
> - */
> - if (unlikely(hdr->hdr.gso_type))
> + data = mergeable_xdp_prepare(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
> + offset, &len, hdr);
> + if (!data)
> goto err_xdp;
>
> - /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
> - * with headroom may add hole in truesize, which
> - * make their length exceed PAGE_SIZE. So we disabled the
> - * hole mechanism for xdp. See add_recvbuf_mergeable().
> - */
> - frame_sz = truesize;
> -
> - /* This happens when headroom is not enough because
> - * of the buffer was prefilled before XDP is set.
> - * This should only happen for the first several packets.
> - * In fact, vq reset can be used here to help us clean up
> - * the prefilled buffers, but many existing devices do not
> - * support it, and we don't want to bother users who are
> - * using xdp normally.
> - */
> - if (!xdp_prog->aux->xdp_has_frags &&
> - (num_buf > 1 || headroom < virtnet_get_headroom(vi))) {
> - /* linearize data for XDP */
> - xdp_page = xdp_linearize_page(rq, &num_buf,
> - page, offset,
> - VIRTIO_XDP_HEADROOM,
> - &len);
> - frame_sz = PAGE_SIZE;
> -
> - if (!xdp_page)
> - goto err_xdp;
> - offset = VIRTIO_XDP_HEADROOM;
> -
> - put_page(page);
> - page = xdp_page;
> - } else if (unlikely(headroom < virtnet_get_headroom(vi))) {
> - xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
> - sizeof(struct skb_shared_info));
> - if (len + xdp_room > PAGE_SIZE)
> - goto err_xdp;
> -
> - xdp_page = alloc_page(GFP_ATOMIC);
> - if (!xdp_page)
> - goto err_xdp;
> -
> - memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
> - page_address(page) + offset, len);
> - frame_sz = PAGE_SIZE;
> - offset = VIRTIO_XDP_HEADROOM;
> -
> - put_page(page);
> - page = xdp_page;
> - } else {
> - xdp_page = page;
> - }
> -
> - data = page_address(xdp_page) + offset;
> err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
> &num_buf, &xdp_frags_truesz, stats);
> if (unlikely(err))
Powered by blists - more mailing lists