[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJaqyWcc2ZtnqUGNk6ox7S_kbnDGy3kWPyxC-7HT4F7aN22BRA@mail.gmail.com>
Date: Fri, 1 Dec 2023 15:51:47 +0100
From: Eugenio Perez Martin <eperezma@...hat.com>
To: Dragos Tatulea <dtatulea@...dia.com>
Cc: "Michael S . Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Si-Wei Liu <si-wei.liu@...cle.com>,
Saeed Mahameed <saeedm@...dia.com>,
Leon Romanovsky <leon@...nel.org>,
virtualization@...ts.linux-foundation.org, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, Gal Pressman <galp@...dia.com>,
Parav Pandit <parav@...dia.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
Subject: Re: [PATCH vhost 4/7] vdpa/mlx5: Introduce per vq and device resume
On Fri, Dec 1, 2023 at 11:50 AM Dragos Tatulea <dtatulea@...dia.com> wrote:
>
> Implement vdpa vq and device resume if capability detected. Add support
> for suspend -> ready state change.
>
> Signed-off-by: Dragos Tatulea <dtatulea@...dia.com>
Acked-by: Eugenio Pérez <eperezma@...hat.com>
> ---
> drivers/vdpa/mlx5/net/mlx5_vnet.c | 67 +++++++++++++++++++++++++++----
> 1 file changed, 60 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index d06285e46fe2..68e534cb57e2 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -1170,7 +1170,12 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu
> return err;
> }
>
> -static bool is_valid_state_change(int oldstate, int newstate)
> +static bool is_resumable(struct mlx5_vdpa_net *ndev)
> +{
> + return ndev->mvdev.vdev.config->resume;
> +}
> +
> +static bool is_valid_state_change(int oldstate, int newstate, bool resumable)
> {
> switch (oldstate) {
> case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
> @@ -1178,6 +1183,7 @@ static bool is_valid_state_change(int oldstate, int newstate)
> case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
> return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
> case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
> + return resumable ? newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY : false;
> case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
> default:
> return false;
> @@ -1200,6 +1206,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
> {
> int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
> u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
> + bool state_change = false;
> void *obj_context;
> void *cmd_hdr;
> void *in;
> @@ -1211,9 +1218,6 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
> if (!modifiable_virtqueue_fields(mvq))
> return -EINVAL;
>
> - if (!is_valid_state_change(mvq->fw_state, state))
> - return -EINVAL;
> -
> in = kzalloc(inlen, GFP_KERNEL);
> if (!in)
> return -ENOMEM;
> @@ -1226,17 +1230,29 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
> MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
>
> obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
> - if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
> +
> + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
> + if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
> + err = -EINVAL;
> + goto done;
> + }
> +
> MLX5_SET(virtio_net_q_object, obj_context, state, state);
> + state_change = true;
> + }
>
> MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields);
> err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
> - kfree(in);
> - if (!err)
> + if (err)
> + goto done;
> +
> + if (state_change)
> mvq->fw_state = state;
>
> mvq->modified_fields = 0;
>
> +done:
> + kfree(in);
> return err;
> }
>
> @@ -1430,6 +1446,24 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
> suspend_vq(ndev, &ndev->vqs[i]);
> }
>
> +static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
> +{
> + if (!mvq->initialized || !is_resumable(ndev))
> + return;
> +
> + if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)
> + return;
> +
> + if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY))
> + mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed\n");
> +}
> +
> +static void resume_vqs(struct mlx5_vdpa_net *ndev)
> +{
> + for (int i = 0; i < ndev->mvdev.max_vqs; i++)
> + resume_vq(ndev, &ndev->vqs[i]);
> +}
> +
> static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
> {
> if (!mvq->initialized)
> @@ -3256,6 +3290,21 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
> return 0;
> }
>
> +static int mlx5_vdpa_resume(struct vdpa_device *vdev)
> +{
> + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
> + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
> +
> + mlx5_vdpa_info(mvdev, "resuming device\n");
> +
> + down_write(&ndev->reslock);
> + mvdev->suspended = false;
> + resume_vqs(ndev);
> + register_link_notifier(ndev);
> + up_write(&ndev->reslock);
> + return 0;
> +}
> +
> static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
> unsigned int asid)
> {
> @@ -3312,6 +3361,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
> .get_vq_dma_dev = mlx5_get_vq_dma_dev,
> .free = mlx5_vdpa_free,
> .suspend = mlx5_vdpa_suspend,
> + .resume = mlx5_vdpa_resume, /* Op disabled if not supported. */
> };
>
> static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
> @@ -3683,6 +3733,9 @@ static int mlx5v_probe(struct auxiliary_device *adev,
> if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, desc_group_mkey_supported))
> mgtdev->vdpa_ops.get_vq_desc_group = NULL;
>
> + if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, freeze_to_rdy_supported))
> + mgtdev->vdpa_ops.resume = NULL;
> +
> err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
> if (err)
> goto reg_err;
> --
> 2.42.0
>
Powered by blists - more mailing lists