[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250225020455.212895-3-jdamato@fastly.com>
Date: Tue, 25 Feb 2025 02:04:49 +0000
From: Joe Damato <jdamato@...tly.com>
To: netdev@...r.kernel.org
Cc: mkarsten@...terloo.ca,
gerhard@...leder-embedded.com,
jasowang@...hat.com,
xuanzhuo@...ux.alibaba.com,
kuba@...nel.org,
Joe Damato <jdamato@...tly.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Eugenio PĂ©rez <eperezma@...hat.com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
virtualization@...ts.linux.dev (open list:VIRTIO CORE AND NET DRIVERS),
linux-kernel@...r.kernel.org (open list)
Subject: [PATCH net-next v4 2/4] virtio-net: Refactor napi_disable paths
Create virtnet_napi_disable helper and refactor virtnet_napi_tx_disable
to take a struct send_queue.
Signed-off-by: Joe Damato <jdamato@...tly.com>
---
drivers/net/virtio_net.c | 25 +++++++++++++++++--------
1 file changed, 17 insertions(+), 8 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 133b004c7a9a..e578885c1093 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2845,12 +2845,21 @@ static void virtnet_napi_tx_enable(struct send_queue *sq)
virtnet_napi_do_enable(sq->vq, napi);
}
-static void virtnet_napi_tx_disable(struct napi_struct *napi)
+static void virtnet_napi_tx_disable(struct send_queue *sq)
{
+ struct napi_struct *napi = &sq->napi;
+
if (napi->weight)
napi_disable(napi);
}
+static void virtnet_napi_disable(struct receive_queue *rq)
+{
+ struct napi_struct *napi = &rq->napi;
+
+ napi_disable(napi);
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi =
@@ -2861,7 +2870,7 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
virtnet_napi_enable(rq);
@@ -3060,8 +3069,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
- virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
- napi_disable(&vi->rq[qp_index].napi);
+ virtnet_napi_tx_disable(&vi->sq[qp_index]);
+ virtnet_napi_disable(&vi->rq[qp_index]);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
@@ -3333,7 +3342,7 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
bool running = netif_running(vi->dev);
if (running) {
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
virtnet_cancel_dim(vi, &rq->dim);
}
}
@@ -3375,7 +3384,7 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
qindex = sq - vi->sq;
if (running)
- virtnet_napi_tx_disable(&sq->napi);
+ virtnet_napi_tx_disable(sq);
txq = netdev_get_tx_queue(vi->dev, qindex);
@@ -5952,8 +5961,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
+ virtnet_napi_disable(&vi->rq[i]);
+ virtnet_napi_tx_disable(&vi->sq[i]);
}
}
--
2.45.2
Powered by blists - more mailing lists