[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240425125855.87025-2-hengqi@linux.alibaba.com>
Date: Thu, 25 Apr 2024 20:58:53 +0800
From: Heng Qi <hengqi@...ux.alibaba.com>
To: netdev@...r.kernel.org,
virtualization@...ts.linux.dev
Cc: "Michael S . Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eric Dumazet <edumazet@...gle.com>,
"David S . Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Subject: [PATCH net-next 1/3] virtio_net: enable irq for the control vq
Control vq polling request results consume more CPU.
Especially when dim issues more control requests to the device,
it's beneficial to the guest to enable control vq's irq.
Suggested-by: Jason Wang <jasowang@...hat.com>
Signed-off-by: Heng Qi <hengqi@...ux.alibaba.com>
---
drivers/net/virtio_net.c | 45 ++++++++++++++++++++++++++++++----------
1 file changed, 34 insertions(+), 11 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a4d3c76654a4..79a1b30c173c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -287,6 +287,12 @@ struct virtnet_info {
bool has_cvq;
struct mutex cvq_lock;
+ /* Wait for the device to complete the request */
+ struct completion completion;
+
+ /* Work struct for acquisition of cvq processing results. */
+ struct work_struct get_cvq;
+
/* Host can handle any s/g split between our header and packet data */
bool any_header_sg;
@@ -520,6 +526,13 @@ static bool virtqueue_napi_complete(struct napi_struct *napi,
return false;
}
+static void virtnet_cvq_done(struct virtqueue *cvq)
+{
+ struct virtnet_info *vi = cvq->vdev->priv;
+
+ schedule_work(&vi->get_cvq);
+}
+
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -2036,6 +2049,20 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
return !oom;
}
+static void virtnet_get_cvq_work(struct work_struct *work)
+{
+ struct virtnet_info *vi =
+ container_of(work, struct virtnet_info, get_cvq);
+ unsigned int tmp;
+ void *res;
+
+ mutex_lock(&vi->cvq_lock);
+ res = virtqueue_get_buf(vi->cvq, &tmp);
+ if (res)
+ complete(&vi->completion);
+ mutex_unlock(&vi->cvq_lock);
+}
+
static void skb_recv_done(struct virtqueue *rvq)
{
struct virtnet_info *vi = rvq->vdev->priv;
@@ -2531,7 +2558,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
struct scatterlist *out)
{
struct scatterlist *sgs[4], hdr, stat;
- unsigned out_num = 0, tmp;
+ unsigned out_num = 0;
int ret;
/* Caller should know better */
@@ -2566,16 +2593,10 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
return vi->ctrl->status == VIRTIO_NET_OK;
}
- /* Spin for a response, the kick causes an ioport write, trapping
- * into the hypervisor, so the request should be handled immediately.
- */
- while (!virtqueue_get_buf(vi->cvq, &tmp) &&
- !virtqueue_is_broken(vi->cvq)) {
- cond_resched();
- cpu_relax();
- }
-
mutex_unlock(&vi->cvq_lock);
+
+ wait_for_completion(&vi->completion);
+
return vi->ctrl->status == VIRTIO_NET_OK;
}
@@ -4433,7 +4454,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
/* Parameters for control virtqueue, if any */
if (vi->has_cvq) {
- callbacks[total_vqs - 1] = NULL;
+ callbacks[total_vqs - 1] = virtnet_cvq_done;
names[total_vqs - 1] = "control";
}
@@ -4952,6 +4973,8 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
+ INIT_WORK(&vi->get_cvq, virtnet_get_cvq_work);
+ init_completion(&vi->completion);
enable_rx_mode_work(vi);
/* serialize netdev register + virtio_device_ready() with ndo_open() */
--
2.32.0.3.g01195cf9f
Powered by blists - more mailing lists