[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3f5b23d8-e008-1586-9c15-a4e29d67e5b3@linux.alibaba.com>
Date: Thu, 8 Dec 2022 16:23:10 +0800
From: Heng Qi <hengqi@...ux.alibaba.com>
To: Jason Wang <jasowang@...hat.com>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
"Michael S. Tsirkin" <mst@...hat.com>,
Paolo Abeni <pabeni@...hat.com>,
Jakub Kicinski <kuba@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
"David S. Miller" <davem@...emloft.net>,
Daniel Borkmann <daniel@...earbox.net>,
Alexei Starovoitov <ast@...nel.org>,
Eric Dumazet <edumazet@...gle.com>
Subject: Re: [RFC PATCH 4/9] virtio_net: remove xdp related info from
page_to_skb()
在 2022/12/6 下午1:36, Jason Wang 写道:
> On Tue, Nov 22, 2022 at 3:44 PM Heng Qi <hengqi@...ux.alibaba.com> wrote:
>> For the clear construction of multi-buffer xdp_buff, we now remove the xdp
>> processing interleaved with page_to_skb() before, and the logic of xdp and
>> building skb from xdp will be separate and independent.
>>
>> Signed-off-by: Heng Qi <hengqi@...ux.alibaba.com>
>> Reviewed-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> So I think the organization of this series needs some tweak.
>
> If I was not not and if we do things like this, XDP support is
> actually broken and it breaks bisection and a lot of other things.
>
> We need make sure each patch does not break anything, it probably requires
>
> 1) squash the following patches or
> 2) having a new helper to do XDP stuffs after/before page_to_skb()
Your comments are informative, I'm going to make this patch more
independent of
"[PATCH 9/9] virtio_net: support multi-buffer xdp", so that each patch
doesn't break anything.
Thanks.
>
> Thanks
>
>> ---
>> drivers/net/virtio_net.c | 41 +++++++++-------------------------------
>> 1 file changed, 9 insertions(+), 32 deletions(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index d3e8c63b9c4b..cd65f85d5075 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -439,9 +439,7 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
>> static struct sk_buff *page_to_skb(struct virtnet_info *vi,
>> struct receive_queue *rq,
>> struct page *page, unsigned int offset,
>> - unsigned int len, unsigned int truesize,
>> - bool hdr_valid, unsigned int metasize,
>> - unsigned int headroom)
>> + unsigned int len, unsigned int truesize)
>> {
>> struct sk_buff *skb;
>> struct virtio_net_hdr_mrg_rxbuf *hdr;
>> @@ -459,21 +457,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
>> else
>> hdr_padded_len = sizeof(struct padded_vnet_hdr);
>>
>> - /* If headroom is not 0, there is an offset between the beginning of the
>> - * data and the allocated space, otherwise the data and the allocated
>> - * space are aligned.
>> - *
>> - * Buffers with headroom use PAGE_SIZE as alloc size, see
>> - * add_recvbuf_mergeable() + get_mergeable_buf_len()
>> - */
>> - truesize = headroom ? PAGE_SIZE : truesize;
>> - tailroom = truesize - headroom;
>> - buf = p - headroom;
>> -
>> + buf = p;
>> len -= hdr_len;
>> offset += hdr_padded_len;
>> p += hdr_padded_len;
>> - tailroom -= hdr_padded_len + len;
>> + tailroom = truesize - hdr_padded_len - len;
>>
>> shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
>>
>> @@ -503,7 +491,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
>> if (len <= skb_tailroom(skb))
>> copy = len;
>> else
>> - copy = ETH_HLEN + metasize;
>> + copy = ETH_HLEN;
>> skb_put_data(skb, p, copy);
>>
>> len -= copy;
>> @@ -542,19 +530,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
>> give_pages(rq, page);
>>
>> ok:
>> - /* hdr_valid means no XDP, so we can copy the vnet header */
>> - if (hdr_valid) {
>> - hdr = skb_vnet_hdr(skb);
>> - memcpy(hdr, hdr_p, hdr_len);
>> - }
>> + hdr = skb_vnet_hdr(skb);
>> + memcpy(hdr, hdr_p, hdr_len);
>> if (page_to_free)
>> put_page(page_to_free);
>>
>> - if (metasize) {
>> - __skb_pull(skb, metasize);
>> - skb_metadata_set(skb, metasize);
>> - }
>> -
>> return skb;
>> }
>>
>> @@ -917,7 +897,7 @@ static struct sk_buff *receive_big(struct net_device *dev,
>> {
>> struct page *page = buf;
>> struct sk_buff *skb =
>> - page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
>> + page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
>>
>> stats->bytes += len - vi->hdr_len;
>> if (unlikely(!skb))
>> @@ -1060,9 +1040,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>> rcu_read_unlock();
>> put_page(page);
>> head_skb = page_to_skb(vi, rq, xdp_page, offset,
>> - len, PAGE_SIZE, false,
>> - metasize,
>> - headroom);
>> + len, PAGE_SIZE);
>> return head_skb;
>> }
>> break;
>> @@ -1116,8 +1094,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>> rcu_read_unlock();
>>
>> skip_xdp:
>> - head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
>> - metasize, headroom);
>> + head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
>> curr_skb = head_skb;
>>
>> if (unlikely(!curr_skb))
>> --
>> 2.19.1.6.gb485710b
>>
Powered by blists - more mailing lists