[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230516165043-mutt-send-email-mst@kernel.org>
Date: Tue, 16 May 2023 16:54:38 -0400
From: "Michael S. Tsirkin" <mst@...hat.com>
To: Jason Wang <jasowang@...hat.com>
Cc: davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
pabeni@...hat.com, virtualization@...ts.linux-foundation.org,
linux-kernel@...r.kernel.org, maxime.coquelin@...hat.com,
alvaro.karsz@...id-run.com, eperezma@...hat.com,
xuanzhuo@...ux.alibaba.com, david.marchand@...hat.com
Subject: Re: [PATCH net-next V2 2/2] virtio-net: sleep instead of busy
waiting for cvq command
On Thu, Apr 13, 2023 at 02:40:27PM +0800, Jason Wang wrote:
> We used to busy waiting on the cvq command this tends to be
> problematic since there no way for to schedule another process which
> may serve for the control virtqueue. This might be the case when the
> control virtqueue is emulated by software. This patch switches to use
> completion to allow the CPU to sleep instead of busy waiting for the
> cvq command.
>
> Signed-off-by: Jason Wang <jasowang@...hat.com>
> ---
> Changes since V1:
> - use completion for simplicity
> - don't try to harden the CVQ command which requires more thought
> Changes since RFC:
> - break the device when timeout
> - get buffer manually since the virtio core check more_used() instead
> ---
> drivers/net/virtio_net.c | 21 ++++++++++++++-------
> 1 file changed, 14 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 2e56bbf86894..d3eb8fd6c9dc 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -19,6 +19,7 @@
> #include <linux/average.h>
> #include <linux/filter.h>
> #include <linux/kernel.h>
> +#include <linux/completion.h>
> #include <net/route.h>
> #include <net/xdp.h>
> #include <net/net_failover.h>
> @@ -295,6 +296,8 @@ struct virtnet_info {
>
> /* failover when STANDBY feature enabled */
> struct failover *failover;
> +
> + struct completion completion;
> };
>
> struct padded_vnet_hdr {
> @@ -1709,6 +1712,13 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> return !oom;
> }
>
> +static void virtnet_cvq_done(struct virtqueue *cvq)
> +{
> + struct virtnet_info *vi = cvq->vdev->priv;
> +
> + complete(&vi->completion);
> +}
> +
> static void skb_recv_done(struct virtqueue *rvq)
> {
> struct virtnet_info *vi = rvq->vdev->priv;
> @@ -2169,12 +2179,8 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> if (unlikely(!virtqueue_kick(vi->cvq)))
> return vi->ctrl->status == VIRTIO_NET_OK;
>
> - /* Spin for a response, the kick causes an ioport write, trapping
> - * into the hypervisor, so the request should be handled immediately.
> - */
> - while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> - !virtqueue_is_broken(vi->cvq))
> - cpu_relax();
> + wait_for_completion(&vi->completion);
> + virtqueue_get_buf(vi->cvq, &tmp);
>
> return vi->ctrl->status == VIRTIO_NET_OK;
This seems to break surprise removal and other
situations where vq gets broken since callbacks
aren't usually invoked then.
> }
> @@ -3672,7 +3678,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
>
> /* Parameters for control virtqueue, if any */
> if (vi->has_cvq) {
> - callbacks[total_vqs - 1] = NULL;
> + callbacks[total_vqs - 1] = virtnet_cvq_done;
> names[total_vqs - 1] = "control";
> }
>
There is a cost to this, in that we are burning an extra MSI vector
for the slow path cvq. if device has 3 vectors, suddenly we can't
allocate vectors for rx and tx, big problem.
So I'm afraid we need to pass a new flag that will share
the config changed interrupt and cvq.
> @@ -4122,6 +4128,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> if (vi->has_rss || vi->has_rss_hash_report)
> virtnet_init_default_rss(vi);
>
> + init_completion(&vi->completion);
> enable_rx_mode_work(vi);
>
> /* serialize netdev register + virtio_device_ready() with ndo_open() */
> --
> 2.25.1
Powered by blists - more mailing lists