lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 23 Mar 2021 11:09:20 +0800
From:   Jason Wang <jasowang@...hat.com>
To:     Xie Yongji <xieyongji@...edance.com>, mst@...hat.com,
        stefanha@...hat.com, sgarzare@...hat.com, parav@...dia.com,
        bob.liu@...cle.com, hch@...radead.org, rdunlap@...radead.org,
        willy@...radead.org, viro@...iv.linux.org.uk, axboe@...nel.dk,
        bcrl@...ck.org, corbet@....net, mika.penttila@...tfour.com,
        dan.carpenter@...cle.com
Cc:     virtualization@...ts.linux-foundation.org, netdev@...r.kernel.org,
        kvm@...r.kernel.org, linux-fsdevel@...r.kernel.org
Subject: Re: [PATCH v5 06/11] vdpa: factor out vhost_vdpa_pa_map()


在 2021/3/15 下午1:37, Xie Yongji 写道:
> The upcoming patch is going to support VA mapping. So let's
> factor out the logic of PA mapping firstly to make the code
> more readable.
>
> Suggested-by: Jason Wang <jasowang@...hat.com>
> Signed-off-by: Xie Yongji <xieyongji@...edance.com>


Acked-by: Jason Wang <jasowang@...hat.com>

While at it, I think it's better to factor out the unmap() part? Since 
the unpin and page dirty is not needed for va device.

Thanks


> ---
>   drivers/vhost/vdpa.c | 46 ++++++++++++++++++++++++++++------------------
>   1 file changed, 28 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index b24ec69a374b..7c83fbf3edac 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -579,37 +579,28 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
>   	}
>   }
>   
> -static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
> -					   struct vhost_iotlb_msg *msg)
> +static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
> +			     u64 iova, u64 size, u64 uaddr, u32 perm)
>   {
>   	struct vhost_dev *dev = &v->vdev;
> -	struct vhost_iotlb *iotlb = dev->iotlb;
>   	struct page **page_list;
>   	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>   	unsigned int gup_flags = FOLL_LONGTERM;
>   	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
>   	unsigned long lock_limit, sz2pin, nchunks, i;
> -	u64 iova = msg->iova;
> +	u64 start = iova;
>   	long pinned;
>   	int ret = 0;
>   
> -	if (msg->iova < v->range.first ||
> -	    msg->iova + msg->size - 1 > v->range.last)
> -		return -EINVAL;
> -
> -	if (vhost_iotlb_itree_first(iotlb, msg->iova,
> -				    msg->iova + msg->size - 1))
> -		return -EEXIST;
> -
>   	/* Limit the use of memory for bookkeeping */
>   	page_list = (struct page **) __get_free_page(GFP_KERNEL);
>   	if (!page_list)
>   		return -ENOMEM;
>   
> -	if (msg->perm & VHOST_ACCESS_WO)
> +	if (perm & VHOST_ACCESS_WO)
>   		gup_flags |= FOLL_WRITE;
>   
> -	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
> +	npages = PAGE_ALIGN(size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
>   	if (!npages) {
>   		ret = -EINVAL;
>   		goto free;
> @@ -623,7 +614,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   		goto unlock;
>   	}
>   
> -	cur_base = msg->uaddr & PAGE_MASK;
> +	cur_base = uaddr & PAGE_MASK;
>   	iova &= PAGE_MASK;
>   	nchunks = 0;
>   
> @@ -654,7 +645,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>   				ret = vhost_vdpa_map(v, iova, csize,
>   						     map_pfn << PAGE_SHIFT,
> -						     msg->perm);
> +						     perm);
>   				if (ret) {
>   					/*
>   					 * Unpin the pages that are left unmapped
> @@ -683,7 +674,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   
>   	/* Pin the rest chunk */
>   	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
> -			     map_pfn << PAGE_SHIFT, msg->perm);
> +			     map_pfn << PAGE_SHIFT, perm);
>   out:
>   	if (ret) {
>   		if (nchunks) {
> @@ -702,13 +693,32 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
>   				unpin_user_page(pfn_to_page(pfn));
>   		}
> -		vhost_vdpa_unmap(v, msg->iova, msg->size);
> +		vhost_vdpa_unmap(v, start, size);
>   	}
>   unlock:
>   	mmap_read_unlock(dev->mm);
>   free:
>   	free_page((unsigned long)page_list);
>   	return ret;
> +
> +}
> +
> +static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
> +					   struct vhost_iotlb_msg *msg)
> +{
> +	struct vhost_dev *dev = &v->vdev;
> +	struct vhost_iotlb *iotlb = dev->iotlb;
> +
> +	if (msg->iova < v->range.first ||
> +	    msg->iova + msg->size - 1 > v->range.last)
> +		return -EINVAL;
> +
> +	if (vhost_iotlb_itree_first(iotlb, msg->iova,
> +				    msg->iova + msg->size - 1))
> +		return -EEXIST;
> +
> +	return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
> +				 msg->perm);
>   }
>   
>   static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ