[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250110202605.429475-4-jdamato@fastly.com>
Date: Fri, 10 Jan 2025 20:26:04 +0000
From: Joe Damato <jdamato@...tly.com>
To: netdev@...r.kernel.org
Cc: mkarsten@...terloo.ca,
Joe Damato <jdamato@...tly.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eugenio PĂ©rez <eperezma@...hat.com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
virtualization@...ts.linux.dev (open list:VIRTIO CORE AND NET DRIVERS),
linux-kernel@...r.kernel.org (open list)
Subject: [PATCH net-next 3/3] virtio_net: Map NAPIs to queues
Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
can be accessed by user apps.
$ ethtool -i ens4 | grep driver
driver: virtio_net
$ sudo ethtool -L ens4 combined 4
$ ./tools/net/ynl/pyynl/cli.py \
--spec Documentation/netlink/specs/netdev.yaml \
--dump queue-get --json='{"ifindex": 2}'
[{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
{'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
{'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
{'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
{'id': 0, 'ifindex': 2, 'type': 'tx'},
{'id': 1, 'ifindex': 2, 'type': 'tx'},
{'id': 2, 'ifindex': 2, 'type': 'tx'},
{'id': 3, 'ifindex': 2, 'type': 'tx'}]
Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
the lack of 'napi-id' in the above output is expected.
Signed-off-by: Joe Damato <jdamato@...tly.com>
---
drivers/net/virtio_net.c | 29 ++++++++++++++++++++++++++---
1 file changed, 26 insertions(+), 3 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4e88d352d3eb..8f0f26cc5a94 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2804,14 +2804,28 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
}
static void virtnet_napi_enable_lock(struct virtqueue *vq,
- struct napi_struct *napi)
+ struct napi_struct *napi,
+ bool need_rtnl)
{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int q = vq2rxq(vq);
+
virtnet_napi_do_enable(vq, napi);
+
+ if (q < vi->curr_queue_pairs) {
+ if (need_rtnl)
+ rtnl_lock();
+
+ netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (need_rtnl)
+ rtnl_unlock();
+ }
}
static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
- virtnet_napi_enable_lock(vq, napi);
+ virtnet_napi_enable_lock(vq, napi, false);
}
static void virtnet_napi_tx_enable(struct virtnet_info *vi,
@@ -2848,9 +2862,13 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
+ rtnl_lock();
+ netif_queue_set_napi(vi->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ rtnl_unlock();
napi_disable(&rq->napi);
+
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable_lock(rq->vq, &rq->napi);
+ virtnet_napi_enable_lock(rq->vq, &rq->napi, true);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -3048,6 +3066,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
+ netif_queue_set_napi(vi->dev, qp_index, NETDEV_QUEUE_TYPE_RX, NULL);
napi_disable(&vi->rq[qp_index].napi);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
@@ -3317,8 +3336,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
+ int q = vq2rxq(rq->vq);
if (running) {
+ netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, NULL);
napi_disable(&rq->napi);
virtnet_cancel_dim(vi, &rq->dim);
}
@@ -5943,6 +5964,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
+ netif_queue_set_napi(vi->dev, i, NETDEV_QUEUE_TYPE_RX,
+ NULL);
napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
--
2.25.1
Powered by blists - more mailing lists