[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEvePWxKR2=mkYLG5-22HD9WtM8Ew4z4pQtw1p-Ri6miQw@mail.gmail.com>
Date: Wed, 8 Mar 2023 13:20:32 +0800
From: Jason Wang <jasowang@...hat.com>
To: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
Cc: netdev@...r.kernel.org, "Michael S. Tsirkin" <mst@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
virtualization@...ts.linux-foundation.org, bpf@...r.kernel.org,
Yichun Zhang <yichun@...nresty.com>,
Alexander Duyck <alexanderduyck@...com>
Subject: Re: [PATCH net, stable v1 2/3] virtio_net: separate the logic of
checking whether sq is full
On Wed, Mar 8, 2023 at 10:49 AM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
>
> Separate the logic of checking whether sq is full. The subsequent patch
> will reuse this func.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> Reviewed-by: Alexander Duyck <alexanderduyck@...com>
> Acked-by: Michael S. Tsirkin <mst@...hat.com>
Acked-by: Jason Wang <jasowang@...hat.com>
Thanks
> ---
> drivers/net/virtio_net.c | 60 ++++++++++++++++++++++++----------------
> 1 file changed, 36 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 8b31a04052f2..46bbddaadb0d 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -591,6 +591,41 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> return false;
> }
>
> +static void check_sq_full_and_disable(struct virtnet_info *vi,
> + struct net_device *dev,
> + struct send_queue *sq)
> +{
> + bool use_napi = sq->napi.weight;
> + int qnum;
> +
> + qnum = sq - vi->sq;
> +
> + /* If running out of space, stop queue to avoid getting packets that we
> + * are then unable to transmit.
> + * An alternative would be to force queuing layer to requeue the skb by
> + * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
> + * returned in a normal path of operation: it means that driver is not
> + * maintaining the TX queue stop/start state properly, and causes
> + * the stack to do a non-trivial amount of useless work.
> + * Since most packets only take 1 or 2 ring slots, stopping the queue
> + * early means 16 slots are typically wasted.
> + */
> + if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> + netif_stop_subqueue(dev, qnum);
> + if (use_napi) {
> + if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
> + virtqueue_napi_schedule(&sq->napi, sq->vq);
> + } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
> + /* More just got used, free them then recheck. */
> + free_old_xmit_skbs(sq, false);
> + if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> + netif_start_subqueue(dev, qnum);
> + virtqueue_disable_cb(sq->vq);
> + }
> + }
> + }
> +}
> +
> static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
> struct send_queue *sq,
> struct xdp_frame *xdpf)
> @@ -1989,30 +2024,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> nf_reset_ct(skb);
> }
>
> - /* If running out of space, stop queue to avoid getting packets that we
> - * are then unable to transmit.
> - * An alternative would be to force queuing layer to requeue the skb by
> - * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
> - * returned in a normal path of operation: it means that driver is not
> - * maintaining the TX queue stop/start state properly, and causes
> - * the stack to do a non-trivial amount of useless work.
> - * Since most packets only take 1 or 2 ring slots, stopping the queue
> - * early means 16 slots are typically wasted.
> - */
> - if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> - netif_stop_subqueue(dev, qnum);
> - if (use_napi) {
> - if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
> - virtqueue_napi_schedule(&sq->napi, sq->vq);
> - } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
> - /* More just got used, free them then recheck. */
> - free_old_xmit_skbs(sq, false);
> - if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> - netif_start_subqueue(dev, qnum);
> - virtqueue_disable_cb(sq->vq);
> - }
> - }
> - }
> + check_sq_full_and_disable(vi, dev, sq);
>
> if (kick || netif_xmit_stopped(txq)) {
> if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
> --
> 2.32.0.3.g01195cf9f
>
Powered by blists - more mailing lists