[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260102152023.10773-4-minhquangbui99@gmail.com>
Date: Fri, 2 Jan 2026 22:20:23 +0700
From: Bui Quang Minh <minhquangbui99@...il.com>
To: netdev@...r.kernel.org
Cc: "Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eugenio Pérez <eperezma@...hat.com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>,
virtualization@...ts.linux.dev,
linux-kernel@...r.kernel.org,
bpf@...r.kernel.org,
Bui Quang Minh <minhquangbui99@...il.com>
Subject: [PATCH net v2 3/3] virtio-net: clean up __virtnet_rx_pause/resume
The delayed refill worker is removed which makes virtnet_rx_pause/resume
quite the same as __virtnet_rx_pause/resume. So remove
__virtnet_rx_pause/resume and move the code to virtnet_rx_pause/resume.
Signed-off-by: Bui Quang Minh <minhquangbui99@...il.com>
---
drivers/net/virtio_net.c | 30 ++++++++++--------------------
1 file changed, 10 insertions(+), 20 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7e77a05b5662..95c80f55fa9a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3366,8 +3366,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void __virtnet_rx_pause(struct virtnet_info *vi,
- struct receive_queue *rq)
+static void virtnet_rx_pause(struct virtnet_info *vi,
+ struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
@@ -3382,17 +3382,12 @@ static void virtnet_rx_pause_all(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
- __virtnet_rx_pause(vi, &vi->rq[i]);
+ virtnet_rx_pause(vi, &vi->rq[i]);
}
-static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
-{
- __virtnet_rx_pause(vi, rq);
-}
-
-static void __virtnet_rx_resume(struct virtnet_info *vi,
- struct receive_queue *rq,
- bool refill)
+static void virtnet_rx_resume(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ bool refill)
{
bool running = netif_running(vi->dev);
@@ -3412,17 +3407,12 @@ static void virtnet_rx_resume_all(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
- __virtnet_rx_resume(vi, &vi->rq[i], true);
+ virtnet_rx_resume(vi, &vi->rq[i], true);
else
- __virtnet_rx_resume(vi, &vi->rq[i], false);
+ virtnet_rx_resume(vi, &vi->rq[i], false);
}
}
-static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
-{
- __virtnet_rx_resume(vi, rq, true);
-}
-
static int virtnet_rx_resize(struct virtnet_info *vi,
struct receive_queue *rq, u32 ring_num)
{
@@ -3436,7 +3426,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
if (err)
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
- virtnet_rx_resume(vi, rq);
+ virtnet_rx_resume(vi, rq, true);
return err;
}
@@ -5814,7 +5804,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
rq->xsk_pool = pool;
- virtnet_rx_resume(vi, rq);
+ virtnet_rx_resume(vi, rq, true);
if (pool)
return 0;
--
2.43.0
Powered by blists - more mailing lists