[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y9zIPdKmTvXqyuYS@boxer>
Date: Fri, 3 Feb 2023 09:39:25 +0100
From: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
To: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
CC: <netdev@...r.kernel.org>, "David S. Miller" <davem@...emloft.net>,
"Eric Dumazet" <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Björn Töpel <bjorn@...nel.org>,
Magnus Karlsson <magnus.karlsson@...el.com>,
Jonathan Lemon <jonathan.lemon@...il.com>,
Alexei Starovoitov <ast@...nel.org>,
"Daniel Borkmann" <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Menglong Dong <imagedong@...cent.com>,
"Kuniyuki Iwashima" <kuniyu@...zon.com>,
Petr Machata <petrm@...dia.com>,
<virtualization@...ts.linux-foundation.org>, <bpf@...r.kernel.org>
Subject: Re: [PATCH 29/33] virtio_net: xsk: tx: support tx
On Thu, Feb 02, 2023 at 07:00:54PM +0800, Xuan Zhuo wrote:
> The driver's tx napi is very important for XSK. It is responsible for
> obtaining data from the XSK queue and sending it out.
>
> At the beginning, we need to trigger tx napi.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> ---
> drivers/net/virtio/main.c | 12 +++-
> drivers/net/virtio/xsk.c | 146 ++++++++++++++++++++++++++++++++++++++
> drivers/net/virtio/xsk.h | 2 +
> 3 files changed, 159 insertions(+), 1 deletion(-)
>
(...)
> +static int virtnet_xsk_xmit_batch(struct send_queue *sq,
> + struct xsk_buff_pool *pool,
> + unsigned int budget,
> + struct virtnet_sq_stats *stats)
> +{
> + int ret = XSK_XMIT_NO_BUDGET;
> + struct xdp_desc desc;
> + int err, packet = 0;
> +
> + while (budget-- > 0) {
> + if (sq->vq->num_free < 2) {
> + __free_old_xmit(sq, true, stats);
> + if (sq->vq->num_free < 2) {
> + ret = XSK_XMIT_DEV_BUSY;
> + break;
> + }
> + }
> +
> + if (!xsk_tx_peek_desc(pool, &desc)) {
anything that stopped from using xsk_tx_peek_release_desc_batch() ?
> + ret = XSK_XMIT_DONE;
> + break;
> + }
> +
> + err = virtnet_xsk_xmit_one(sq, pool, &desc);
> + if (unlikely(err)) {
> + ret = XSK_XMIT_DEV_BUSY;
> + break;
> + }
> +
> + ++packet;
> +
> + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
> + ++stats->kicks;
> + }
> +
> + if (packet) {
> + stats->xdp_tx += packet;
> +
> + xsk_tx_release(pool);
> + }
> +
> + return ret;
> +}
> +
> +bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
> + int budget)
> +{
> + struct virtnet_sq_stats stats = {};
> + bool busy;
> + int ret;
> +
> + __free_old_xmit(sq, true, &stats);
> +
> + if (xsk_uses_need_wakeup(pool))
> + xsk_set_tx_need_wakeup(pool);
> +
> + ret = virtnet_xsk_xmit_batch(sq, pool, budget, &stats);
> + switch (ret) {
> + case XSK_XMIT_DONE:
> + /* xsk tx qeueu has been consumed done. should complete napi. */
> + busy = false;
> + break;
> +
> + case XSK_XMIT_NO_BUDGET:
> + /* reach the budget limit. should let napi run again. */
> + busy = true;
> + break;
> +
> + case XSK_XMIT_DEV_BUSY:
> + /* sq vring is full, should complete napi. wait for tx napi been
> + * triggered by interrupt.
> + */
> + busy = false;
> + break;
> + }
> +
> + virtnet_xsk_check_queue(sq);
> +
> + u64_stats_update_begin(&sq->stats.syncp);
> + sq->stats.packets += stats.packets;
> + sq->stats.bytes += stats.bytes;
> + sq->stats.kicks += stats.kicks;
> + sq->stats.xdp_tx += stats.xdp_tx;
> + u64_stats_update_end(&sq->stats.syncp);
> +
> + return busy;
> +}
> +
> static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
> struct xsk_buff_pool *pool, struct net_device *dev)
> {
> diff --git a/drivers/net/virtio/xsk.h b/drivers/net/virtio/xsk.h
> index ad684c812091..15f1540a5803 100644
> --- a/drivers/net/virtio/xsk.h
> +++ b/drivers/net/virtio/xsk.h
> @@ -20,4 +20,6 @@ static inline u32 ptr_to_xsk(void *ptr)
> }
>
> int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp);
> +bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
> + int budget);
> #endif
> --
> 2.32.0.3.g01195cf9f
>
Powered by blists - more mailing lists