[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251223152533.24364-4-minhquangbui99@gmail.com>
Date: Tue, 23 Dec 2025 22:25:33 +0700
From: Bui Quang Minh <minhquangbui99@...il.com>
To: netdev@...r.kernel.org
Cc: "Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eugenio Pérez <eperezma@...hat.com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>,
virtualization@...ts.linux.dev,
linux-kernel@...r.kernel.org,
bpf@...r.kernel.org,
Bui Quang Minh <minhquangbui99@...il.com>
Subject: [PATCH net 3/3] virtio-net: schedule the pending refill work after being enabled
As we need to move the enable_delayed_refill after napi_enable, it's
possible that a refill work needs to be scheduled in virtnet_receive but
it cannot. This can make the receive side stuck because if we don't have
any receive buffers, there will be nothing trigger the refill logic. So
in case it happens, in virtnet_receive, set the rx queue's
refill_pending, then when the refill work is enabled again, a refill
work will be scheduled.
Signed-off-by: Bui Quang Minh <minhquangbui99@...il.com>
---
drivers/net/virtio_net.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8016d2b378cf..ddc62dab2f9a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -383,6 +383,9 @@ struct receive_queue {
/* Is delayed refill enabled? */
bool refill_enabled;
+ /* A refill work needs to be scheduled when delayed refill is enabled */
+ bool refill_pending;
+
/* The lock to synchronize the access to refill_enabled */
spinlock_t refill_lock;
@@ -720,10 +723,13 @@ static void virtnet_rq_free_buf(struct virtnet_info *vi,
put_page(virt_to_head_page(buf));
}
-static void enable_delayed_refill(struct receive_queue *rq)
+static void enable_delayed_refill(struct receive_queue *rq,
+ bool schedule_refill)
{
spin_lock_bh(&rq->refill_lock);
rq->refill_enabled = true;
+ if (rq->refill_pending || schedule_refill)
+ schedule_delayed_work(&rq->refill, 0);
spin_unlock_bh(&rq->refill_lock);
}
@@ -3032,6 +3038,8 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
spin_lock(&rq->refill_lock);
if (rq->refill_enabled)
schedule_delayed_work(&rq->refill, 0);
+ else
+ rq->refill_pending = true;
spin_unlock(&rq->refill_lock);
}
}
@@ -3228,11 +3236,8 @@ static int virtnet_open(struct net_device *dev)
if (err < 0)
goto err_enable_qp;
- if (i < vi->curr_queue_pairs) {
- enable_delayed_refill(&vi->rq[i]);
- if (schedule_refill)
- schedule_delayed_work(&vi->rq[i].refill, 0);
- }
+ if (i < vi->curr_queue_pairs)
+ enable_delayed_refill(&vi->rq[i], schedule_refill);
}
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
@@ -3480,9 +3485,7 @@ static void __virtnet_rx_resume(struct virtnet_info *vi,
if (running)
virtnet_napi_enable(rq);
- enable_delayed_refill(rq);
- if (schedule_refill)
- schedule_delayed_work(&rq->refill, 0);
+ enable_delayed_refill(rq, schedule_refill);
}
static void virtnet_rx_resume_all(struct virtnet_info *vi)
--
2.43.0
Powered by blists - more mailing lists