[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y9zJS+ugeY9qEMt9@boxer>
Date: Fri, 3 Feb 2023 09:43:55 +0100
From: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
To: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
CC: <netdev@...r.kernel.org>, "David S. Miller" <davem@...emloft.net>,
"Eric Dumazet" <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Björn Töpel <bjorn@...nel.org>,
Magnus Karlsson <magnus.karlsson@...el.com>,
Jonathan Lemon <jonathan.lemon@...il.com>,
Alexei Starovoitov <ast@...nel.org>,
"Daniel Borkmann" <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Menglong Dong <imagedong@...cent.com>,
"Kuniyuki Iwashima" <kuniyu@...zon.com>,
Petr Machata <petrm@...dia.com>,
<virtualization@...ts.linux-foundation.org>, <bpf@...r.kernel.org>
Subject: Re: [PATCH 32/33] virtio_net: xsk: rx: introduce add_recvbuf_xsk()
On Thu, Feb 02, 2023 at 07:00:57PM +0800, Xuan Zhuo wrote:
> Implement the logic of filling vq with XSK buffer.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> ---
> drivers/net/virtio/main.c | 11 +++++++++++
> drivers/net/virtio/xsk.c | 26 ++++++++++++++++++++++++++
> drivers/net/virtio/xsk.h | 2 ++
> 3 files changed, 39 insertions(+)
>
> diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
> index 7259b27f5cba..2aff0eee35d3 100644
> --- a/drivers/net/virtio/main.c
> +++ b/drivers/net/virtio/main.c
> @@ -1352,10 +1352,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
> */
> bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp)
> {
> + struct xsk_buff_pool *pool;
> int err;
> bool oom;
>
> do {
> + rcu_read_lock();
> + pool = rcu_dereference(rq->xsk.pool);
> + if (pool) {
> + err = add_recvbuf_xsk(vi, rq, pool, gfp);
> + rcu_read_unlock();
> + goto check;
> + }
> + rcu_read_unlock();
> +
> if (vi->mergeable_rx_bufs)
> err = add_recvbuf_mergeable(vi, rq, gfp);
> else if (vi->big_packets)
> @@ -1363,6 +1373,7 @@ bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp)
> else
> err = add_recvbuf_small(vi, rq, gfp);
>
> +check:
> oom = err == -ENOMEM;
> if (err)
> break;
> diff --git a/drivers/net/virtio/xsk.c b/drivers/net/virtio/xsk.c
> index 043b0bf2a5d7..a5e88f919c46 100644
> --- a/drivers/net/virtio/xsk.c
> +++ b/drivers/net/virtio/xsk.c
> @@ -37,6 +37,32 @@ static void virtnet_xsk_check_queue(struct send_queue *sq)
> netif_stop_subqueue(dev, qnum);
> }
>
> +int add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> + struct xsk_buff_pool *pool, gfp_t gfp)
> +{
> + struct xdp_buff *xdp;
> + dma_addr_t addr;
> + u32 len;
> + int err;
> +
> + xdp = xsk_buff_alloc(pool);
same question as on tx side -anything stopped you from using batch API -
xsk_buff_alloc_batch() ?
> + if (!xdp)
> + return -ENOMEM;
> +
> + /* use the part of XDP_PACKET_HEADROOM as the virtnet hdr space */
> + addr = xsk_buff_xdp_get_dma(xdp) - vi->hdr_len;
> + len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
> +
> + sg_init_table(rq->sg, 1);
> + sg_fill_dma(rq->sg, addr, len);
> +
> + err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, xdp, gfp);
> + if (err)
> + xsk_buff_free(xdp);
> +
> + return err;
> +}
> +
> static int virtnet_xsk_xmit_one(struct send_queue *sq,
> struct xsk_buff_pool *pool,
> struct xdp_desc *desc)
> diff --git a/drivers/net/virtio/xsk.h b/drivers/net/virtio/xsk.h
> index f90c28972d72..5549143ef118 100644
> --- a/drivers/net/virtio/xsk.h
> +++ b/drivers/net/virtio/xsk.h
> @@ -24,4 +24,6 @@ int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp);
> bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
> int budget);
> int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag);
> +int add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> + struct xsk_buff_pool *pool, gfp_t gfp);
> #endif
> --
> 2.32.0.3.g01195cf9f
>
Powered by blists - more mailing lists