lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 20 Jul 2022 14:03:40 +0800
From:   Jason Wang <jasowang@...hat.com>
To:     Xie Yongji <xieyongji@...edance.com>
Cc:     mst <mst@...hat.com>, Liu Xiaodong <xiaodong.liu@...el.com>,
        Maxime Coquelin <maxime.coquelin@...hat.com>,
        Stefan Hajnoczi <stefanha@...hat.com>,
        songmuchun@...edance.com,
        virtualization <virtualization@...ts.linux-foundation.org>,
        linux-kernel <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v3 3/5] vduse: Support using userspace pages as bounce buffer

On Wed, Jul 20, 2022 at 12:42 PM Xie Yongji <xieyongji@...edance.com> wrote:
>
> Introduce two APIs: vduse_domain_add_user_bounce_pages()
> and vduse_domain_remove_user_bounce_pages() to support
> adding and removing userspace pages for bounce buffers.
> During adding and removing, the DMA data would be copied
> from the kernel bounce pages to the userspace bounce pages
> and back.
>
> Signed-off-by: Xie Yongji <xieyongji@...edance.com>

Acked-by: Jason Wang <jasowang@...hat.com>

> ---
>  drivers/vdpa/vdpa_user/iova_domain.c | 96 +++++++++++++++++++++++++---
>  drivers/vdpa/vdpa_user/iova_domain.h |  8 +++
>  2 files changed, 96 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> index 50d7c08d5450..e682bc7ee6c9 100644
> --- a/drivers/vdpa/vdpa_user/iova_domain.c
> +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> @@ -178,8 +178,9 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
>                             map->orig_phys == INVALID_PHYS_ADDR))
>                         return;
>
> -               addr = page_address(map->bounce_page) + offset;
> -               do_bounce(map->orig_phys + offset, addr, sz, dir);
> +               addr = kmap_local_page(map->bounce_page);
> +               do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
> +               kunmap_local(addr);
>                 size -= sz;
>                 iova += sz;
>         }
> @@ -210,20 +211,23 @@ static struct page *
>  vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
>  {
>         struct vduse_bounce_map *map;
> -       struct page *page;
> +       struct page *page = NULL;
>
> +       read_lock(&domain->bounce_lock);
>         map = &domain->bounce_maps[iova >> PAGE_SHIFT];
> -       if (!map->bounce_page)
> -               return NULL;
> +       if (domain->user_bounce_pages || !map->bounce_page)
> +               goto out;
>
>         page = map->bounce_page;
>         get_page(page);
> +out:
> +       read_unlock(&domain->bounce_lock);
>
>         return page;
>  }
>
>  static void
> -vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
> +vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
>  {
>         struct vduse_bounce_map *map;
>         unsigned long pfn, bounce_pfns;
> @@ -243,6 +247,73 @@ vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
>         }
>  }
>
> +int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> +                                      struct page **pages, int count)
> +{
> +       struct vduse_bounce_map *map;
> +       int i, ret;
> +
> +       /* Now we don't support partial mapping */
> +       if (count != (domain->bounce_size >> PAGE_SHIFT))
> +               return -EINVAL;
> +
> +       write_lock(&domain->bounce_lock);
> +       ret = -EEXIST;
> +       if (domain->user_bounce_pages)
> +               goto out;
> +
> +       for (i = 0; i < count; i++) {
> +               map = &domain->bounce_maps[i];
> +               if (map->bounce_page) {
> +                       /* Copy kernel page to user page if it's in use */
> +                       if (map->orig_phys != INVALID_PHYS_ADDR)
> +                               memcpy_to_page(pages[i], 0,
> +                                              page_address(map->bounce_page),
> +                                              PAGE_SIZE);
> +                       __free_page(map->bounce_page);
> +               }
> +               map->bounce_page = pages[i];
> +               get_page(pages[i]);
> +       }
> +       domain->user_bounce_pages = true;
> +       ret = 0;
> +out:
> +       write_unlock(&domain->bounce_lock);
> +
> +       return ret;
> +}
> +
> +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> +{
> +       struct vduse_bounce_map *map;
> +       unsigned long i, count;
> +
> +       write_lock(&domain->bounce_lock);
> +       if (!domain->user_bounce_pages)
> +               goto out;
> +
> +       count = domain->bounce_size >> PAGE_SHIFT;
> +       for (i = 0; i < count; i++) {
> +               struct page *page = NULL;
> +
> +               map = &domain->bounce_maps[i];
> +               if (WARN_ON(!map->bounce_page))
> +                       continue;
> +
> +               /* Copy user page to kernel page if it's in use */
> +               if (map->orig_phys != INVALID_PHYS_ADDR) {
> +                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> +                       memcpy_from_page(page_address(page),
> +                                        map->bounce_page, 0, PAGE_SIZE);
> +               }
> +               put_page(map->bounce_page);
> +               map->bounce_page = page;
> +       }
> +       domain->user_bounce_pages = false;
> +out:
> +       write_unlock(&domain->bounce_lock);
> +}
> +
>  void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
>  {
>         if (!domain->bounce_map)
> @@ -318,13 +389,18 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
>         if (vduse_domain_init_bounce_map(domain))
>                 goto err;
>
> +       read_lock(&domain->bounce_lock);
>         if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
> -               goto err;
> +               goto err_unlock;
>
>         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
>                 vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
>
> +       read_unlock(&domain->bounce_lock);
> +
>         return iova;
> +err_unlock:
> +       read_unlock(&domain->bounce_lock);
>  err:
>         vduse_domain_free_iova(iovad, iova, size);
>         return DMA_MAPPING_ERROR;
> @@ -336,10 +412,12 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
>  {
>         struct iova_domain *iovad = &domain->stream_iovad;
>
> +       read_lock(&domain->bounce_lock);
>         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
>                 vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
>
>         vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
> +       read_unlock(&domain->bounce_lock);
>         vduse_domain_free_iova(iovad, dma_addr, size);
>  }
>
> @@ -447,7 +525,8 @@ static int vduse_domain_release(struct inode *inode, struct file *file)
>
>         spin_lock(&domain->iotlb_lock);
>         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> -       vduse_domain_free_bounce_pages(domain);
> +       vduse_domain_remove_user_bounce_pages(domain);
> +       vduse_domain_free_kernel_bounce_pages(domain);
>         spin_unlock(&domain->iotlb_lock);
>         put_iova_domain(&domain->stream_iovad);
>         put_iova_domain(&domain->consistent_iovad);
> @@ -507,6 +586,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
>                 goto err_file;
>
>         domain->file = file;
> +       rwlock_init(&domain->bounce_lock);
>         spin_lock_init(&domain->iotlb_lock);
>         init_iova_domain(&domain->stream_iovad,
>                         PAGE_SIZE, IOVA_START_PFN);
> diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
> index 2722d9b8e21a..4e0e50e7ac15 100644
> --- a/drivers/vdpa/vdpa_user/iova_domain.h
> +++ b/drivers/vdpa/vdpa_user/iova_domain.h
> @@ -14,6 +14,7 @@
>  #include <linux/iova.h>
>  #include <linux/dma-mapping.h>
>  #include <linux/vhost_iotlb.h>
> +#include <linux/rwlock.h>
>
>  #define IOVA_START_PFN 1
>
> @@ -34,6 +35,8 @@ struct vduse_iova_domain {
>         struct vhost_iotlb *iotlb;
>         spinlock_t iotlb_lock;
>         struct file *file;
> +       bool user_bounce_pages;
> +       rwlock_t bounce_lock;
>  };
>
>  int vduse_domain_set_map(struct vduse_iova_domain *domain,
> @@ -61,6 +64,11 @@ void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
>
>  void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
>
> +int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> +                                      struct page **pages, int count);
> +
> +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
> +
>  void vduse_domain_destroy(struct vduse_iova_domain *domain);
>
>  struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit,
> --
> 2.20.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ