[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1682410175.9141502-3-xuanzhuo@linux.alibaba.com>
Date: Tue, 25 Apr 2023 16:09:35 +0800
From: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
To: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
Cc: netdev@...r.kernel.org, "Michael S. Tsirkin" <mst@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
virtualization@...ts.linux-foundation.org, bpf@...r.kernel.org,
Jason Wang <jasowang@...hat.com>
Subject: Re: [PATCH net-next v3 10/15] virtio_net: introduce receive_small_xdp()
On Tue, 25 Apr 2023 16:00:05 +0800, Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
> On Tue, 25 Apr 2023 15:58:03 +0800, Jason Wang <jasowang@...hat.com> wrote:
> > On Sun, Apr 23, 2023 at 6:58 PM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
> > >
> > > The purpose of this patch is to simplify the receive_small().
> > > Separate all the logic of XDP of small into a function.
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> > > ---
> > > drivers/net/virtio_net.c | 165 ++++++++++++++++++++++++---------------
> > > 1 file changed, 100 insertions(+), 65 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index de5a579e8603..9b5fd2e0d27f 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -931,6 +931,99 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
> > > return NULL;
> > > }
> > >
> > > +static struct sk_buff *receive_small_xdp(struct net_device *dev,
> > > + struct virtnet_info *vi,
> > > + struct receive_queue *rq,
> > > + struct bpf_prog *xdp_prog,
> > > + void *buf,
> > > + unsigned int xdp_headroom,
> > > + unsigned int len,
> > > + unsigned int *xdp_xmit,
> > > + struct virtnet_rq_stats *stats)
> > > +{
> > > + unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
> > > + unsigned int headroom = vi->hdr_len + header_offset;
> > > + struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > + struct page *page = virt_to_head_page(buf);
> > > + struct page *xdp_page;
> > > + unsigned int buflen;
> > > + struct xdp_buff xdp;
> > > + struct sk_buff *skb;
> > > + unsigned int delta = 0;
> > > + unsigned int metasize = 0;
> > > + void *orig_data;
> > > + u32 act;
> > > +
> > > + if (unlikely(hdr->hdr.gso_type))
> > > + goto err_xdp;
> > > +
> > > + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
> > > + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> > > +
> > > + if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
> > > + int offset = buf - page_address(page) + header_offset;
> > > + unsigned int tlen = len + vi->hdr_len;
> > > + int num_buf = 1;
> > > +
> > > + xdp_headroom = virtnet_get_headroom(vi);
> > > + header_offset = VIRTNET_RX_PAD + xdp_headroom;
> > > + headroom = vi->hdr_len + header_offset;
> > > + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
> > > + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> > > + xdp_page = xdp_linearize_page(rq, &num_buf, page,
> > > + offset, header_offset,
> > > + &tlen);
> > > + if (!xdp_page)
> > > + goto err_xdp;
> > > +
> > > + buf = page_address(xdp_page);
> > > + put_page(page);
> > > + page = xdp_page;
> > > + }
> > > +
> > > + xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
> > > + xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > > + xdp_headroom, len, true);
> > > + orig_data = xdp.data;
> > > +
> > > + act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > +
> > > + switch (act) {
> > > + case XDP_PASS:
> > > + /* Recalculate length in case bpf program changed it */
> > > + delta = orig_data - xdp.data;
> > > + len = xdp.data_end - xdp.data;
> > > + metasize = xdp.data - xdp.data_meta;
> > > + break;
> > > +
> > > + case XDP_TX:
> > > + case XDP_REDIRECT:
> > > + goto xdp_xmit;
> > > +
> > > + default:
> > > + goto err_xdp;
> > > + }
> > > +
> > > + skb = build_skb(buf, buflen);
> > > + if (!skb)
> > > + goto err;
> > > +
> > > + skb_reserve(skb, headroom - delta);
> > > + skb_put(skb, len);
> > > + if (metasize)
> > > + skb_metadata_set(skb, metasize);
> > > +
> > > + return skb;
> > > +
> > > +err_xdp:
> > > + stats->xdp_drops++;
> > > +err:
> > > + stats->drops++;
> > > + put_page(page);
> > > +xdp_xmit:
> > > + return NULL;
> > > +}
> >
> > It looks like some of the comments of the above version is not addressed?
> >
> > "
> > So we end up with some code duplication between receive_small() and
> > receive_small_xdp() on building skbs. Is this intended?
> > "
>
> I answer you in the #13 commit of the above version. This patch-set has optimize
> this with the last two commits. This commit is not unchanged.
Sorry, typo.
"This commit is unchanged."
Thanks.
>
> Thanks.
>
>
> >
> > Thanks
> >
> > > +
> > > static struct sk_buff *receive_small(struct net_device *dev,
> > > struct virtnet_info *vi,
> > > struct receive_queue *rq,
> > > @@ -947,9 +1040,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
> > > SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> > > struct page *page = virt_to_head_page(buf);
> > > - unsigned int delta = 0;
> > > - struct page *xdp_page;
> > > - unsigned int metasize = 0;
> > >
> > > len -= vi->hdr_len;
> > > stats->bytes += len;
> > > @@ -969,56 +1059,10 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > rcu_read_lock();
> > > xdp_prog = rcu_dereference(rq->xdp_prog);
> > > if (xdp_prog) {
> > > - struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > - struct xdp_buff xdp;
> > > - void *orig_data;
> > > - u32 act;
> > > -
> > > - if (unlikely(hdr->hdr.gso_type))
> > > - goto err_xdp;
> > > -
> > > - if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
> > > - int offset = buf - page_address(page) + header_offset;
> > > - unsigned int tlen = len + vi->hdr_len;
> > > - int num_buf = 1;
> > > -
> > > - xdp_headroom = virtnet_get_headroom(vi);
> > > - header_offset = VIRTNET_RX_PAD + xdp_headroom;
> > > - headroom = vi->hdr_len + header_offset;
> > > - buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
> > > - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> > > - xdp_page = xdp_linearize_page(rq, &num_buf, page,
> > > - offset, header_offset,
> > > - &tlen);
> > > - if (!xdp_page)
> > > - goto err_xdp;
> > > -
> > > - buf = page_address(xdp_page);
> > > - put_page(page);
> > > - page = xdp_page;
> > > - }
> > > -
> > > - xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
> > > - xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > > - xdp_headroom, len, true);
> > > - orig_data = xdp.data;
> > > -
> > > - act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > -
> > > - switch (act) {
> > > - case XDP_PASS:
> > > - /* Recalculate length in case bpf program changed it */
> > > - delta = orig_data - xdp.data;
> > > - len = xdp.data_end - xdp.data;
> > > - metasize = xdp.data - xdp.data_meta;
> > > - break;
> > > - case XDP_TX:
> > > - case XDP_REDIRECT:
> > > - rcu_read_unlock();
> > > - goto xdp_xmit;
> > > - default:
> > > - goto err_xdp;
> > > - }
> > > + skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, xdp_headroom,
> > > + len, xdp_xmit, stats);
> > > + rcu_read_unlock();
> > > + return skb;
> > > }
> > > rcu_read_unlock();
> > >
> > > @@ -1026,25 +1070,16 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > skb = build_skb(buf, buflen);
> > > if (!skb)
> > > goto err;
> > > - skb_reserve(skb, headroom - delta);
> > > + skb_reserve(skb, headroom);
> > > skb_put(skb, len);
> > > - if (!xdp_prog) {
> > > - buf += header_offset;
> > > - memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
> > > - } /* keep zeroed vnet hdr since XDP is loaded */
> > > -
> > > - if (metasize)
> > > - skb_metadata_set(skb, metasize);
> > >
> > > + buf += header_offset;
> > > + memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
> > > return skb;
> > >
> > > -err_xdp:
> > > - rcu_read_unlock();
> > > - stats->xdp_drops++;
> > > err:
> > > stats->drops++;
> > > put_page(page);
> > > -xdp_xmit:
> > > return NULL;
> > > }
> > >
> > > --
> > > 2.32.0.3.g01195cf9f
> > >
> >
Powered by blists - more mailing lists