[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1726127678.0431764-5-xuanzhuo@linux.alibaba.com>
Date: Thu, 12 Sep 2024 15:54:38 +0800
From: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
To: Jason Wang <jasowang@...hat.com>
Cc: netdev@...r.kernel.org,
"Michael S. Tsirkin" <mst@...hat.com>,
Eugenio Pérez <eperezma@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
virtualization@...ts.linux.dev,
bpf@...r.kernel.org
Subject: Re: [PATCH net-next 08/13] virtio_net: xsk: bind/unbind xsk for tx
On Wed, 11 Sep 2024 12:08:06 +0800, Jason Wang <jasowang@...hat.com> wrote:
> On Tue, Aug 20, 2024 at 3:33 PM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
> >
> > This patch implement the logic of bind/unbind xsk pool to sq and rq.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> > ---
> > drivers/net/virtio_net.c | 54 ++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 54 insertions(+)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 96abee36738b..6a36a204e967 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -295,6 +295,10 @@ struct send_queue {
> >
> > /* Record whether sq is in reset state. */
> > bool reset;
> > +
> > + struct xsk_buff_pool *xsk_pool;
> > +
> > + dma_addr_t xsk_hdr_dma_addr;
> > };
> >
> > /* Internal representation of a receive virtqueue */
> > @@ -494,6 +498,8 @@ struct virtio_net_common_hdr {
> > };
> > };
> >
> > +static struct virtio_net_common_hdr xsk_hdr;
> > +
> > static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > struct net_device *dev,
> > @@ -5476,6 +5482,29 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
> > return err;
> > }
> >
> > +static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
> > + struct send_queue *sq,
> > + struct xsk_buff_pool *pool)
> > +{
> > + int err, qindex;
> > +
> > + qindex = sq - vi->sq;
> > +
> > + virtnet_tx_pause(vi, sq);
> > +
> > + err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf);
> > + if (err) {
> > + netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
> > + pool = NULL;
> > + }
> > +
> > + sq->xsk_pool = pool;
> > +
> > + virtnet_tx_resume(vi, sq);
> > +
> > + return err;
> > +}
> > +
> > static int virtnet_xsk_pool_enable(struct net_device *dev,
> > struct xsk_buff_pool *pool,
> > u16 qid)
> > @@ -5484,6 +5513,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
> > struct receive_queue *rq;
> > struct device *dma_dev;
> > struct send_queue *sq;
> > + dma_addr_t hdr_dma;
> > int err, size;
> >
> > if (vi->hdr_len > xsk_pool_get_headroom(pool))
> > @@ -5521,6 +5551,10 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
> > if (!rq->xsk_buffs)
> > return -ENOMEM;
> >
> > + hdr_dma = dma_map_single(dma_dev, &xsk_hdr, vi->hdr_len, DMA_TO_DEVICE);
>
> Let's use the virtqueue_dma_xxx() wrappers here.
Will fix.
Thanks.
>
> > + if (dma_mapping_error(dma_dev, hdr_dma))
> > + return -ENOMEM;
> > +
> > err = xsk_pool_dma_map(pool, dma_dev, 0);
> > if (err)
> > goto err_xsk_map;
> > @@ -5529,11 +5563,23 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
> > if (err)
> > goto err_rq;
> >
> > + err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
> > + if (err)
> > + goto err_sq;
> > +
> > + /* Now, we do not support tx offset, so all the tx virtnet hdr is zero.
> > + * So all the tx packets can share a single hdr.
> > + */
> > + sq->xsk_hdr_dma_addr = hdr_dma;
> > +
> > return 0;
> >
> > +err_sq:
> > + virtnet_rq_bind_xsk_pool(vi, rq, NULL);
> > err_rq:
> > xsk_pool_dma_unmap(pool, 0);
> > err_xsk_map:
> > + dma_unmap_single(dma_dev, hdr_dma, vi->hdr_len, DMA_TO_DEVICE);
> > return err;
> > }
> >
> > @@ -5542,19 +5588,27 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
> > struct virtnet_info *vi = netdev_priv(dev);
> > struct xsk_buff_pool *pool;
> > struct receive_queue *rq;
> > + struct device *dma_dev;
> > + struct send_queue *sq;
> > int err;
> >
> > if (qid >= vi->curr_queue_pairs)
> > return -EINVAL;
> >
> > + sq = &vi->sq[qid];
> > rq = &vi->rq[qid];
> >
> > pool = rq->xsk_pool;
> >
> > err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
> > + err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
> >
> > xsk_pool_dma_unmap(pool, 0);
> >
> > + dma_dev = virtqueue_dma_dev(sq->vq);
> > +
> > + dma_unmap_single(dma_dev, sq->xsk_hdr_dma_addr, vi->hdr_len, DMA_TO_DEVICE);
>
> And here.
>
> Thanks
>
> > +
> > kvfree(rq->xsk_buffs);
> >
> > return err;
> > --
> > 2.32.0.3.g01195cf9f
> >
>
Powered by blists - more mailing lists