[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4c049eea-e00e-0c5f-9b52-95925a178dd1@redhat.com>
Date: Wed, 14 Apr 2021 17:37:44 +0800
From: Jason Wang <jasowang@...hat.com>
To: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>, netdev@...r.kernel.org
Cc: "Michael S. Tsirkin" <mst@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
virtualization@...ts.linux-foundation.org
Subject: Re: [PATCH net-next v2] virtio-net: page_to_skb() use build_skb when
there's sufficient tailroom
在 2021/4/14 上午9:52, Xuan Zhuo 写道:
> In page_to_skb(), if we have enough tailroom to save skb_shared_info, we
> can use build_skb to create skb directly. No need to alloc for
> additional space. And it can save a 'frags slot', which is very friendly
> to GRO.
>
> Here, if the payload of the received package is too small (less than
> GOOD_COPY_LEN), we still choose to copy it directly to the space got by
> napi_alloc_skb. So we can reuse these pages.
>
> Testing Machine:
> The four queues of the network card are bound to the cpu1.
>
> Test command:
> for ((i=0;i<5;++i)); do sockperf tp --ip 192.168.122.64 -m 1000 -t 150& done
>
> The size of the udp package is 1000, so in the case of this patch, there
> will always be enough tailroom to use build_skb. The sent udp packet
> will be discarded because there is no port to receive it. The irqsoftd
> of the machine is 100%, we observe the received quantity displayed by
> sar -n DEV 1:
>
> no build_skb: 956864.00 rxpck/s
> build_skb: 1158465.00 rxpck/s
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> Suggested-by: Jason Wang <jasowang@...hat.com>
> ---
>
> v2: conflict resolution
>
> drivers/net/virtio_net.c | 51 ++++++++++++++++++++++++++--------------
> 1 file changed, 33 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 101659cd4b87..d7142b508bd0 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -383,17 +383,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> {
> struct sk_buff *skb;
> struct virtio_net_hdr_mrg_rxbuf *hdr;
> - unsigned int copy, hdr_len, hdr_padded_len;
> - char *p;
> + unsigned int copy, hdr_len, hdr_padded_len, tailroom, shinfo_size;
> + char *p, *hdr_p;
>
> p = page_address(page) + offset;
> -
> - /* copy small packet so we can reuse these pages for small data */
> - skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
> - if (unlikely(!skb))
> - return NULL;
> -
> - hdr = skb_vnet_hdr(skb);
> + hdr_p = p;
>
> hdr_len = vi->hdr_len;
> if (vi->mergeable_rx_bufs)
> @@ -401,14 +395,28 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> else
> hdr_padded_len = sizeof(struct padded_vnet_hdr);
>
> - /* hdr_valid means no XDP, so we can copy the vnet header */
> - if (hdr_valid)
> - memcpy(hdr, p, hdr_len);
> + tailroom = truesize - len;
The math looks not correct in the case of XDP. Since the eBPF pgoram can
choose to adjust the header and insert meta which will cause the
truesize is less than len.
Note that in the case of XDP, we always reserve sufficient tailroom for
shinfo, see add_recvbuf_mergeable():
unsigned int tailroom = headroom ? sizeof(struct
skb_shared_info) : 0;
Thanks
>
> len -= hdr_len;
> offset += hdr_padded_len;
> p += hdr_padded_len;
>
> + shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> +
> + if (len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
> + skb = build_skb(p, truesize);
> + if (unlikely(!skb))
> + return NULL;
> +
> + skb_put(skb, len);
> + goto ok;
> + }
> +
> + /* copy small packet so we can reuse these pages for small data */
> + skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
> + if (unlikely(!skb))
> + return NULL;
> +
> /* Copy all frame if it fits skb->head, otherwise
> * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
> */
> @@ -418,11 +426,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> copy = ETH_HLEN + metasize;
> skb_put_data(skb, p, copy);
>
> - if (metasize) {
> - __skb_pull(skb, metasize);
> - skb_metadata_set(skb, metasize);
> - }
> -
> len -= copy;
> offset += copy;
>
> @@ -431,7 +434,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> skb_add_rx_frag(skb, 0, page, offset, len, truesize);
> else
> put_page(page);
> - return skb;
> + goto ok;
> }
>
> /*
> @@ -458,6 +461,18 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> if (page)
> give_pages(rq, page);
>
> +ok:
> + /* hdr_valid means no XDP, so we can copy the vnet header */
> + if (hdr_valid) {
> + hdr = skb_vnet_hdr(skb);
> + memcpy(hdr, hdr_p, hdr_len);
> + }
> +
> + if (metasize) {
> + __skb_pull(skb, metasize);
> + skb_metadata_set(skb, metasize);
> + }
> +
> return skb;
> }
>
> --
> 2.31.0
>
Powered by blists - more mailing lists