lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <54239b51-918c-3475-dc88-4da1a4548da8@redhat.com>
Date:   Fri, 29 Jan 2021 11:49:45 +0800
From:   Jason Wang <jasowang@...hat.com>
To:     Eli Cohen <elic@...dia.com>, mst@...hat.com
Cc:     virtualization@...ts.linux-foundation.org, netdev@...r.kernel.org,
        linux-kernel@...r.kernel.org, lulu@...hat.com
Subject: Re: [PATCH 2/2] vdpa/mlx5: Restore the hardware used index after
 change map


On 2021/1/28 下午9:41, Eli Cohen wrote:
> When a change of memory map occurs, the hardware resources are destroyed
> and then re-created again with the new memory map. In such case, we need
> to restore the hardware available and used indices. The driver failed to
> restore the used index which is added here.
>
> Fixes 1a86b377aa21 ("vdpa/mlx5: Add VDPA driver for supported mlx5 devices")
> Signed-off-by: Eli Cohen <elic@...dia.com>


A question. Does this mean after a vq is suspended, the hw used index is 
not equal to vq used index?

Thanks


> ---
>   drivers/vdpa/mlx5/net/mlx5_vnet.c | 7 +++++++
>   1 file changed, 7 insertions(+)
>
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 549ded074ff3..3fc8588cecae 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -87,6 +87,7 @@ struct mlx5_vq_restore_info {
>   	u64 device_addr;
>   	u64 driver_addr;
>   	u16 avail_index;
> +	u16 used_index;
>   	bool ready;
>   	struct vdpa_callback cb;
>   	bool restore;
> @@ -121,6 +122,7 @@ struct mlx5_vdpa_virtqueue {
>   	u32 virtq_id;
>   	struct mlx5_vdpa_net *ndev;
>   	u16 avail_idx;
> +	u16 used_idx;
>   	int fw_state;
>   
>   	/* keep last in the struct */
> @@ -804,6 +806,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
>   
>   	obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
>   	MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
> +	MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
>   	MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
>   		 get_features_12_3(ndev->mvdev.actual_features));
>   	vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
> @@ -1022,6 +1025,7 @@ static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
>   struct mlx5_virtq_attr {
>   	u8 state;
>   	u16 available_index;
> +	u16 used_index;
>   };
>   
>   static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
> @@ -1052,6 +1056,7 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu
>   	memset(attr, 0, sizeof(*attr));
>   	attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
>   	attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
> +	attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
>   	kfree(out);
>   	return 0;
>   
> @@ -1602,6 +1607,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
>   		return err;
>   
>   	ri->avail_index = attr.available_index;
> +	ri->used_index = attr.used_index;
>   	ri->ready = mvq->ready;
>   	ri->num_ent = mvq->num_ent;
>   	ri->desc_addr = mvq->desc_addr;
> @@ -1646,6 +1652,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
>   			continue;
>   
>   		mvq->avail_idx = ri->avail_index;
> +		mvq->used_idx = ri->used_index;
>   		mvq->ready = ri->ready;
>   		mvq->num_ent = ri->num_ent;
>   		mvq->desc_addr = ri->desc_addr;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ