[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <39958cd6-3ab1-433a-8eed-129304bc059e@gmail.com>
Date: Mon, 8 Dec 2025 22:48:26 +0700
From: Bui Quang Minh <minhquangbui99@...il.com>
To: netdev@...r.kernel.org
Cc: "Michael S. Tsirkin" <mst@...hat.com>, Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>, Eugenio Pérez
<eperezma@...hat.com>, Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>, virtualization@...ts.linux.dev,
linux-kernel@...r.kernel.org, bpf@...r.kernel.org
Subject: Re: [PATCH net] virtio-net: enable all napis before scheduling refill
work
On 12/8/25 22:34, Bui Quang Minh wrote:
> Calling napi_disable() on an already disabled napi can cause the
> deadlock. In commit 4bc12818b363 ("virtio-net: disable delayed refill
> when pausing rx"), to avoid the deadlock, when pausing the RX in
> virtnet_rx_pause[_all](), we disable and cancel the delayed refill work.
> However, in the virtnet_rx_resume_all(), we enable the delayed refill
> work too early before enabling all the receive queue napis.
>
> The deadlock can be reproduced by running
> selftests/drivers/net/hw/xsk_reconfig.py with multiqueue virtio-net
> device and inserting a cond_resched() inside the for loop in
> virtnet_rx_resume_all() to increase the success rate. Because the worker
> processing the delayed refilled work runs on the same CPU as
> virtnet_rx_resume_all(), a reschedule is needed to cause the deadlock.
> In real scenario, the contention on netdev_lock can cause the
> reschedule.
>
> This fixes the deadlock by ensuring all receive queue's napis are
> enabled before we enable the delayed refill work in
> virtnet_rx_resume_all() and virtnet_open().
>
> Fixes: 4bc12818b363 ("virtio-net: disable delayed refill when pausing rx")
> Reported-by: Paolo Abeni <pabeni@...hat.com>
> Closes: https://netdev-ctrl.bots.linux.dev/logs/vmksft/drv-hw-dbg/results/400961/3-xdp-py/stderr
> Signed-off-by: Bui Quang Minh <minhquangbui99@...il.com>
I forgot to add Cc:stable@...r.kernel.org. I will add in next version.
Thanks,
Quang Minh.
> ---
> drivers/net/virtio_net.c | 59 +++++++++++++++++++---------------------
> 1 file changed, 28 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 8e04adb57f52..f2b1ea65767d 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -2858,6 +2858,20 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> return err != -ENOMEM;
> }
>
> +static void virtnet_rx_refill_all(struct virtnet_info *vi)
> +{
> + bool schedule_refill = false;
> + int i;
> +
> + enable_delayed_refill(vi);
> + for (i = 0; i < vi->curr_queue_pairs; i++)
> + if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
> + schedule_refill = true;
> +
> + if (schedule_refill)
> + schedule_delayed_work(&vi->refill, 0);
> +}
> +
> static void skb_recv_done(struct virtqueue *rvq)
> {
> struct virtnet_info *vi = rvq->vdev->priv;
> @@ -3216,19 +3230,14 @@ static int virtnet_open(struct net_device *dev)
> struct virtnet_info *vi = netdev_priv(dev);
> int i, err;
>
> - enable_delayed_refill(vi);
> -
> for (i = 0; i < vi->max_queue_pairs; i++) {
> - if (i < vi->curr_queue_pairs)
> - /* Make sure we have some buffers: if oom use wq. */
> - if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
> - schedule_delayed_work(&vi->refill, 0);
> -
> err = virtnet_enable_queue_pair(vi, i);
> if (err < 0)
> goto err_enable_qp;
> }
>
> + virtnet_rx_refill_all(vi);
> +
> if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
> if (vi->status & VIRTIO_NET_S_LINK_UP)
> netif_carrier_on(vi->dev);
> @@ -3463,39 +3472,27 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
> __virtnet_rx_pause(vi, rq);
> }
>
> -static void __virtnet_rx_resume(struct virtnet_info *vi,
> - struct receive_queue *rq,
> - bool refill)
> -{
> - bool running = netif_running(vi->dev);
> - bool schedule_refill = false;
> -
> - if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
> - schedule_refill = true;
> - if (running)
> - virtnet_napi_enable(rq);
> -
> - if (schedule_refill)
> - schedule_delayed_work(&vi->refill, 0);
> -}
> -
> static void virtnet_rx_resume_all(struct virtnet_info *vi)
> {
> int i;
>
> - enable_delayed_refill(vi);
> - for (i = 0; i < vi->max_queue_pairs; i++) {
> - if (i < vi->curr_queue_pairs)
> - __virtnet_rx_resume(vi, &vi->rq[i], true);
> - else
> - __virtnet_rx_resume(vi, &vi->rq[i], false);
> + if (netif_running(vi->dev)) {
> + for (i = 0; i < vi->max_queue_pairs; i++)
> + virtnet_napi_enable(&vi->rq[i]);
> +
> + virtnet_rx_refill_all(vi);
> }
> }
>
> static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
> {
> - enable_delayed_refill(vi);
> - __virtnet_rx_resume(vi, rq, true);
> + if (netif_running(vi->dev)) {
> + virtnet_napi_enable(rq);
> +
> + enable_delayed_refill(vi);
> + if (!try_fill_recv(vi, rq, GFP_KERNEL))
> + schedule_delayed_work(&vi->refill, 0);
> + }
> }
>
> static int virtnet_rx_resize(struct virtnet_info *vi,
Powered by blists - more mailing lists