lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <1698028779.8400478-2-xuanzhuo@linux.alibaba.com> Date: Mon, 23 Oct 2023 10:39:39 +0800 From: Xuan Zhuo <xuanzhuo@...ux.alibaba.com> To: Jason Wang <jasowang@...hat.com> Cc: netdev@...r.kernel.org, "David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, "Michael S. Tsirkin" <mst@...hat.com>, Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann <daniel@...earbox.net>, Jesper Dangaard Brouer <hawk@...nel.org>, John Fastabend <john.fastabend@...il.com>, virtualization@...ts.linux-foundation.org, bpf@...r.kernel.org Subject: Re: [PATCH net-next v1 16/19] virtio_net: xsk: rx: introduce receive_xsk() to recv xsk buffer On Fri, 20 Oct 2023 14:57:06 +0800, Jason Wang <jasowang@...hat.com> wrote: > On Mon, Oct 16, 2023 at 8:01 PM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote: > > > > Implementing the logic of xsk rx. If this packet is not for XSK > > determined in XDP, then we need to copy once to generate a SKB. > > If it is for XSK, it is a zerocopy receive packet process. > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com> > > --- > > drivers/net/virtio/main.c | 14 ++-- > > drivers/net/virtio/virtio_net.h | 4 ++ > > drivers/net/virtio/xsk.c | 120 ++++++++++++++++++++++++++++++++ > > drivers/net/virtio/xsk.h | 4 ++ > > 4 files changed, 137 insertions(+), 5 deletions(-) > > > > diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c > > index 0e740447b142..003dd67ab707 100644 > > --- a/drivers/net/virtio/main.c > > +++ b/drivers/net/virtio/main.c > > @@ -822,10 +822,10 @@ static void put_xdp_frags(struct xdp_buff *xdp) > > } > > } > > > > -static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, > > - struct net_device *dev, > > - unsigned int *xdp_xmit, > > - struct virtnet_rq_stats *stats) > > +int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, > > + struct net_device *dev, > > + unsigned int *xdp_xmit, > > + struct virtnet_rq_stats *stats) > > { > > struct xdp_frame *xdpf; > > int err; > > @@ -1589,13 +1589,17 @@ static void receive_buf(struct virtnet_info *vi, struct virtnet_rq *rq, > > return; > > } > > > > - if (vi->mergeable_rx_bufs) > > + rcu_read_lock(); > > + if (rcu_dereference(rq->xsk.pool)) > > + skb = virtnet_receive_xsk(dev, vi, rq, buf, len, xdp_xmit, stats); > > + else if (vi->mergeable_rx_bufs) > > skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, > > stats); > > else if (vi->big_packets) > > skb = receive_big(dev, vi, rq, buf, len, stats); > > else > > skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); > > + rcu_read_unlock(); > > > > if (unlikely(!skb)) > > return; > > diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h > > index 6e71622fca45..fd7f34703c9b 100644 > > --- a/drivers/net/virtio/virtio_net.h > > +++ b/drivers/net/virtio/virtio_net.h > > @@ -346,6 +346,10 @@ static inline bool virtnet_is_xdp_raw_buffer_queue(struct virtnet_info *vi, int > > return false; > > } > > > > +int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, > > + struct net_device *dev, > > + unsigned int *xdp_xmit, > > + struct virtnet_rq_stats *stats); > > void virtnet_rx_pause(struct virtnet_info *vi, struct virtnet_rq *rq); > > void virtnet_rx_resume(struct virtnet_info *vi, struct virtnet_rq *rq); > > void virtnet_tx_pause(struct virtnet_info *vi, struct virtnet_sq *sq); > > diff --git a/drivers/net/virtio/xsk.c b/drivers/net/virtio/xsk.c > > index 841fb078882a..f1c64414fac9 100644 > > --- a/drivers/net/virtio/xsk.c > > +++ b/drivers/net/virtio/xsk.c > > @@ -13,6 +13,18 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) > > sg->length = len; > > } > > > > +static unsigned int virtnet_receive_buf_num(struct virtnet_info *vi, char *buf) > > +{ > > + struct virtio_net_hdr_mrg_rxbuf *hdr; > > + > > + if (vi->mergeable_rx_bufs) { > > + hdr = (struct virtio_net_hdr_mrg_rxbuf *)buf; > > + return virtio16_to_cpu(vi->vdev, hdr->num_buffers); > > + } > > + > > + return 1; > > +} > > + > > static void virtnet_xsk_check_queue(struct virtnet_sq *sq) > > { > > struct virtnet_info *vi = sq->vq->vdev->priv; > > @@ -37,6 +49,114 @@ static void virtnet_xsk_check_queue(struct virtnet_sq *sq) > > netif_stop_subqueue(dev, qnum); > > } > > > > +static void merge_drop_follow_xdp(struct net_device *dev, > > + struct virtnet_rq *rq, > > + u32 num_buf, > > + struct virtnet_rq_stats *stats) > > +{ > > + struct xdp_buff *xdp; > > + u32 len; > > + > > + while (num_buf-- > 1) { > > + xdp = virtqueue_get_buf(rq->vq, &len); > > + if (unlikely(!xdp)) { > > + pr_debug("%s: rx error: %d buffers missing\n", > > + dev->name, num_buf); > > + dev->stats.rx_length_errors++; > > + break; > > + } > > + stats->bytes += len; > > + xsk_buff_free(xdp); > > + } > > +} > > + > > +static struct sk_buff *construct_skb(struct virtnet_rq *rq, > > + struct xdp_buff *xdp) > > +{ > > + unsigned int metasize = xdp->data - xdp->data_meta; > > + struct sk_buff *skb; > > + unsigned int size; > > + > > + size = xdp->data_end - xdp->data_hard_start; > > + skb = napi_alloc_skb(&rq->napi, size); > > + if (unlikely(!skb)) > > + return NULL; > > + > > + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); > > + > > + size = xdp->data_end - xdp->data_meta; > > + memcpy(__skb_put(skb, size), xdp->data_meta, size); > > + > > + if (metasize) { > > + __skb_pull(skb, metasize); > > + skb_metadata_set(skb, metasize); > > + } > > + > > + return skb; > > +} > > + > > +struct sk_buff *virtnet_receive_xsk(struct net_device *dev, struct virtnet_info *vi, > > + struct virtnet_rq *rq, void *buf, > > + unsigned int len, unsigned int *xdp_xmit, > > + struct virtnet_rq_stats *stats) > > +{ > > I wonder if anything blocks us from reusing the existing XDP logic? > Are there some subtle differences? 1. We need to copy data to create skb for XDP_PASS. 2. We need to call xsk_buff_free() to release the buffer. 3. The handle for xdp_buff is difference. virtnet_xdp_handler() is re-used. So the receive code is simple. If we pushed this function into existing code, we would have to maintain code scattered inside merge and small (and big). So I think it is a good choice for us to put the xsk code into a function. Thanks. > > Thanks >
Powered by blists - more mailing lists