[<prev] [next>] [day] [month] [year] [list]
Message-ID: <40af2b73239850e7bf1a81abb71ee99f1b563b9c.1764226734.git.mst@redhat.com>
Date: Thu, 27 Nov 2025 02:01:57 -0500
From: "Michael S. Tsirkin" <mst@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Paolo Abeni <pabeni@...hat.com>, Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eugenio Pérez <eperezma@...hat.com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>,
Bui Quang Minh <minhquangbui99@...il.com>,
virtualization@...ts.linux.dev, netdev@...r.kernel.org,
bpf@...r.kernel.org
Subject: [PATCH RFC] virtio_net: gate delayed refill scheduling
Make the delayed refill worker honor the "refill_enabled" flag by
checking it under refill_lock before requeueing itself. This
prevents a window where virtnet_rx_pause[_all]() disables NAPI and
synchronously waits for the current refill_work instance to finish only
for that instance to immediately arm another run, which then deadlocks
when it tries to double-disable NAPI.
Add and use a helper that encapsulates the locking and flag check so all
refill scheduling paths behave consistently and we no longer replicate
the spin_lock/unlock pattern.
This fixes the deadlock triggered by the XDP selftests when XDP is toggled
and RX is paused/resumed quickly.
Fixes: 4bc12818b363 ("virtio-net: disable delayed refill when pausing rx")
Reported-by: Paolo Abeni <pabeni@...hat.com>
Closes: https://netdev-ctrl.bots.linux.dev/logs/vmksft/drv-hw-dbg/results/400961/3-xdp-py/stderr
Signed-off-by: Michael S. Tsirkin <mst@...hat.com>
---
Lightly tested.
Paolo is there a way to confirm this actually fixes the bug?
Could you help with that?
drivers/net/virtio_net.c | 29 ++++++++++++++++-------------
1 file changed, 16 insertions(+), 13 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8855a994e12b..e2bfe8337f50 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -734,6 +734,15 @@ static void disable_delayed_refill(struct virtnet_info *vi)
spin_unlock_bh(&vi->refill_lock);
}
+static void virtnet_schedule_refill_work(struct virtnet_info *vi,
+ unsigned long delay)
+{
+ spin_lock_bh(&vi->refill_lock);
+ if (vi->refill_enabled)
+ schedule_delayed_work(&vi->refill, delay);
+ spin_unlock_bh(&vi->refill_lock);
+}
+
static void enable_rx_mode_work(struct virtnet_info *vi)
{
rtnl_lock();
@@ -2959,7 +2968,7 @@ static void refill_work(struct work_struct *work)
* we will *never* try to fill again.
*/
if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
+ virtnet_schedule_refill_work(vi, HZ/2);
}
}
@@ -3026,12 +3035,8 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
- if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
- spin_lock(&vi->refill_lock);
- if (vi->refill_enabled)
- schedule_delayed_work(&vi->refill, 0);
- spin_unlock(&vi->refill_lock);
- }
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC))
+ virtnet_schedule_refill_work(vi, 0);
}
u64_stats_set(&stats.packets, packets);
@@ -3216,7 +3221,7 @@ static int virtnet_open(struct net_device *dev)
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
+ virtnet_schedule_refill_work(vi, 0);
err = virtnet_enable_queue_pair(vi, i);
if (err < 0)
@@ -3469,7 +3474,7 @@ static void __virtnet_rx_resume(struct virtnet_info *vi,
virtnet_napi_enable(rq);
if (schedule_refill)
- schedule_delayed_work(&vi->refill, 0);
+ virtnet_schedule_refill_work(vi, 0);
}
static void virtnet_rx_resume_all(struct virtnet_info *vi)
@@ -3815,10 +3820,8 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
succ:
vi->curr_queue_pairs = queue_pairs;
/* virtnet_open() will refill when device is going to up. */
- spin_lock_bh(&vi->refill_lock);
- if (dev->flags & IFF_UP && vi->refill_enabled)
- schedule_delayed_work(&vi->refill, 0);
- spin_unlock_bh(&vi->refill_lock);
+ if (dev->flags & IFF_UP)
+ virtnet_schedule_refill_work(vi, 0);
return 0;
}
--
MST
Powered by blists - more mailing lists