[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240605145533.86229-2-hengqi@linux.alibaba.com>
Date: Wed, 5 Jun 2024 22:55:32 +0800
From: Heng Qi <hengqi@...ux.alibaba.com>
To: netdev@...r.kernel.org,
virtualization@...ts.linux.dev
Cc: Jason Wang <jasowang@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eugenio PĂ©rez <eperezma@...hat.com>,
Eric Dumazet <edumazet@...gle.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Subject: [PATCH net-next v2 1/2] virtio_net: enable irq for the control vq
If the device does not respond to a request for a long time,
then control vq polling elevates CPU utilization, a problem that
exacerbates with more command requests.
Enabling control vq's irq is advantageous for the guest, and
this still doesn't support concurrent requests.
This code is basically from Jason:
https://lore.kernel.org/all/20230413064027.13267-3-jasowang@redhat.com/
Suggested-by: Jason Wang <jasowang@...hat.com>
Signed-off-by: Heng Qi <hengqi@...ux.alibaba.com>
---
drivers/net/virtio_net.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4a802c0ea2cb..9b556ce89546 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -418,6 +418,9 @@ struct virtnet_info {
/* Lock to protect the control VQ */
struct mutex cvq_lock;
+ /* Wait for the device to complete the cvq request. */
+ struct completion completion;
+
/* Host can handle any s/g split between our header and packet data */
bool any_header_sg;
@@ -664,6 +667,13 @@ static bool virtqueue_napi_complete(struct napi_struct *napi,
return false;
}
+static void virtnet_cvq_done(struct virtqueue *cvq)
+{
+ struct virtnet_info *vi = cvq->vdev->priv;
+
+ complete(&vi->completion);
+}
+
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -2721,14 +2731,8 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
if (unlikely(!virtqueue_kick(vi->cvq)))
goto unlock;
- /* Spin for a response, the kick causes an ioport write, trapping
- * into the hypervisor, so the request should be handled immediately.
- */
- while (!virtqueue_get_buf(vi->cvq, &tmp) &&
- !virtqueue_is_broken(vi->cvq)) {
- cond_resched();
- cpu_relax();
- }
+ wait_for_completion(&vi->completion);
+ virtqueue_get_buf(vi->cvq, &tmp);
unlock:
mutex_unlock(&vi->cvq_lock);
@@ -5312,7 +5316,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
/* Parameters for control virtqueue, if any */
if (vi->has_cvq) {
- callbacks[total_vqs - 1] = NULL;
+ callbacks[total_vqs - 1] = virtnet_cvq_done;
names[total_vqs - 1] = "control";
}
@@ -5832,6 +5836,7 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
+ init_completion(&vi->completion);
enable_rx_mode_work(vi);
/* serialize netdev register + virtio_device_ready() with ndo_open() */
--
2.32.0.3.g01195cf9f
Powered by blists - more mailing lists