[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240802072039.267446-6-dtatulea@nvidia.com>
Date: Fri, 2 Aug 2024 10:20:22 +0300
From: Dragos Tatulea <dtatulea@...dia.com>
To: Dragos Tatulea <dtatulea@...dia.com>, "Michael S. Tsirkin"
<mst@...hat.com>, Jason Wang <jasowang@...hat.com>, Xuan Zhuo
<xuanzhuo@...ux.alibaba.com>, Eugenio PĂ©rez
<eperezma@...hat.com>
CC: Si-Wei Liu <si-wei.liu@...cle.com>, Tariq Toukan <tariqt@...dia.com>,
<virtualization@...ts.linux.dev>, <linux-kernel@...r.kernel.org>
Subject: [PATCH vhost 5/7] vdpa/mlx5: Parallelize device suspend
Currently device suspend works on vqs serially. Building up on previous
changes that converted vq operations to the async api, this patch
parallelizes the device suspend:
1) Suspend all active vqs parallel.
2) Query suspended vqs in parallel.
For 1 vDPA device x 32 VQs (16 VQPs) attached to a large VM (256 GB RAM,
32 CPUs x 2 threads per core), the device suspend time is reduced from
~37 ms to ~13 ms.
A later patch will remove the link unregister operation which will make
it even faster.
Signed-off-by: Dragos Tatulea <dtatulea@...dia.com>
Reviewed-by: Tariq Toukan <tariqt@...dia.com>
---
drivers/vdpa/mlx5/net/mlx5_vnet.c | 56 ++++++++++++++++---------------
1 file changed, 29 insertions(+), 27 deletions(-)
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e56a0ee1b725..1887939c5673 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1714,49 +1714,51 @@ static int modify_virtqueues(struct mlx5_vdpa_net *ndev, int start_vq, int num_v
return err;
}
-static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int suspend_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs)
{
- struct mlx5_virtq_attr attr;
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_virtq_attr *attrs;
+ int vq_idx, i;
int err;
+ if (start_vq >= ndev->cur_num_vqs)
+ return -EINVAL;
+
+ mvq = &ndev->vqs[start_vq];
if (!mvq->initialized)
return 0;
if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
return 0;
- err = modify_virtqueues(ndev, mvq->index, 1, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
- if (err) {
- mlx5_vdpa_err(&ndev->mvdev, "modify to suspend failed, err: %d\n", err);
- return err;
- }
-
- err = query_virtqueues(ndev, mvq->index, 1, &attr);
- if (err) {
- mlx5_vdpa_err(&ndev->mvdev, "failed to query virtqueue, err: %d\n", err);
+ err = modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
+ if (err)
return err;
- }
-
- mvq->avail_idx = attr.available_index;
- mvq->used_idx = attr.used_index;
-
- return 0;
-}
-static int suspend_vqs(struct mlx5_vdpa_net *ndev)
-{
- int err = 0;
- int i;
+ attrs = kcalloc(num_vqs, sizeof(struct mlx5_virtq_attr), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
- for (i = 0; i < ndev->cur_num_vqs; i++) {
- int local_err = suspend_vq(ndev, &ndev->vqs[i]);
+ err = query_virtqueues(ndev, start_vq, num_vqs, attrs);
+ if (err)
+ goto done;
- err = local_err ? local_err : err;
+ for (i = 0, vq_idx = start_vq; i < num_vqs; i++, vq_idx++) {
+ mvq = &ndev->vqs[vq_idx];
+ mvq->avail_idx = attrs[i].available_index;
+ mvq->used_idx = attrs[i].used_index;
}
+done:
+ kfree(attrs);
return err;
}
+static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ return suspend_vqs(ndev, mvq->index, 1);
+}
+
static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
int err;
@@ -3137,7 +3139,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
bool teardown = !is_resumable(ndev);
int err;
- suspend_vqs(ndev);
+ suspend_vqs(ndev, 0, ndev->cur_num_vqs);
if (teardown) {
err = save_channels_info(ndev);
if (err)
@@ -3690,7 +3692,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
down_write(&ndev->reslock);
unregister_link_notifier(ndev);
- err = suspend_vqs(ndev);
+ err = suspend_vqs(ndev, 0, ndev->cur_num_vqs);
mlx5_vdpa_cvq_suspend(mvdev);
mvdev->suspended = true;
up_write(&ndev->reslock);
--
2.45.2
Powered by blists - more mailing lists