[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <DM8PR12MB5400B1559C1A4254B6531328AB1F9@DM8PR12MB5400.namprd12.prod.outlook.com>
Date: Wed, 30 Mar 2022 06:47:48 +0000
From: Eli Cohen <elic@...dia.com>
To: Jason Wang <jasowang@...hat.com>, "mst@...hat.com" <mst@...hat.com>
CC: "hdanton@...a.com" <hdanton@...a.com>,
"virtualization@...ts.linux-foundation.org"
<virtualization@...ts.linux-foundation.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: RE: [PATCH RESEND V2 1/3] vdpa: mlx5: prevent cvq work from hogging
CPU
> From: Jason Wang <jasowang@...hat.com>
> Sent: Tuesday, March 29, 2022 7:21 AM
> To: jasowang@...hat.com; mst@...hat.com
> Cc: Eli Cohen <elic@...dia.com>; hdanton@...a.com; virtualization@...ts.linux-foundation.org; linux-kernel@...r.kernel.org
> Subject: [PATCH RESEND V2 1/3] vdpa: mlx5: prevent cvq work from hogging CPU
>
> A userspace triggerable infinite loop could happen in
> mlx5_cvq_kick_handler() if userspace keeps sending a huge amount of
> cvq requests.
>
> Fixing this by introducing a quota and re-queue the work if we're out
> of the budget (currently the implicit budget is one) . While at it,
> using a per device work struct to avoid on demand memory allocation
> for cvq.
>
> Fixes: 5262912ef3cfc ("vdpa/mlx5: Add support for control VQ and MAC setting")
> Signed-off-by: Jason Wang <jasowang@...hat.com>
Acked-by: Eli Cohen <elic@...dia.com>
> ---
> Changes since V1:
> - Using 1 as the budget
> ---
> drivers/vdpa/mlx5/net/mlx5_vnet.c | 21 +++++++++------------
> 1 file changed, 9 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index d0f91078600e..b2afd2b6fbca 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
> u32 cur_num_vqs;
> struct notifier_block nb;
> struct vdpa_callback config_cb;
> + struct mlx5_vdpa_wq_ent cvq_ent;
> };
>
> static void free_resources(struct mlx5_vdpa_net *ndev);
> @@ -1616,10 +1617,10 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
> ndev = to_mlx5_vdpa_ndev(mvdev);
> cvq = &mvdev->cvq;
> if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
> - goto out;
> + return;
>
> if (!cvq->ready)
> - goto out;
> + return;
>
> while (true) {
> err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
> @@ -1653,9 +1654,10 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
>
> if (vringh_need_notify_iotlb(&cvq->vring))
> vringh_notify(&cvq->vring);
> +
> + queue_work(mvdev->wq, &wqent->work);
> + break;
> }
> -out:
> - kfree(wqent);
> }
>
> static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
> @@ -1663,7 +1665,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
> struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
> struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
> struct mlx5_vdpa_virtqueue *mvq;
> - struct mlx5_vdpa_wq_ent *wqent;
>
> if (!is_index_valid(mvdev, idx))
> return;
> @@ -1672,13 +1673,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
> if (!mvdev->cvq.ready)
> return;
>
> - wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
> - if (!wqent)
> - return;
> -
> - wqent->mvdev = mvdev;
> - INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
> - queue_work(mvdev->wq, &wqent->work);
> + queue_work(mvdev->wq, &ndev->cvq_ent.work);
> return;
> }
>
> @@ -2668,6 +2663,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> if (err)
> goto err_mr;
>
> + ndev->cvq_ent.mvdev = mvdev;
> + INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
> mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
> if (!mvdev->wq) {
> err = -ENOMEM;
> --
> 2.18.1
Powered by blists - more mailing lists