lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJaqyWdu9O_q7dzXk98XcUE=wT1J3gA89j_3DAEJ7y1p_V+c5w@mail.gmail.com>
Date: Wed, 17 Sep 2025 18:34:14 +0200
From: Eugenio Perez Martin <eperezma@...hat.com>
To: Jason Wang <jasowang@...hat.com>
Cc: "Michael S . Tsirkin" <mst@...hat.com>, Stefano Garzarella <sgarzare@...hat.com>, 
	Xuan Zhuo <xuanzhuo@...ux.alibaba.com>, linux-kernel@...r.kernel.org, 
	Maxime Coquelin <mcoqueli@...hat.com>, Yongji Xie <xieyongji@...edance.com>, 
	Cindy Lu <lulu@...hat.com>, Laurent Vivier <lvivier@...hat.com>, virtualization@...ts.linux.dev
Subject: Re: [PATCH v2 5/7] vduse: create vduse_as to make it an array

On Wed, Sep 17, 2025 at 10:46 AM Jason Wang <jasowang@...hat.com> wrote:
>
> On Tue, Sep 16, 2025 at 9:09 PM Eugenio Pérez <eperezma@...hat.com> wrote:
> >
> > This is a first step so we can make more than one different address
> > spaces.  No change on the colde flow intended.
> >
> > Acked-by: Jason Wang <jasowang@...hat.com>
>
> Sorry, I think I found something new.
>
> > Signed-off-by: Eugenio Pérez <eperezma@...hat.com>
> > ---
> >  drivers/vdpa/vdpa_user/vduse_dev.c | 114 +++++++++++++++--------------
> >  1 file changed, 59 insertions(+), 55 deletions(-)
> >
> > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > index 9c12ae72abc2..b45b1d22784f 100644
> > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > @@ -86,6 +86,12 @@ struct vduse_umem {
> >         struct mm_struct *mm;
> >  };
> >
> > +struct vduse_as {
> > +       struct vduse_iova_domain *domain;
> > +       struct vduse_umem *umem;
> > +       struct mutex mem_lock;
> > +};
> > +
> >  struct vduse_vq_group_int {
> >         struct vduse_dev *dev;
>
> I would expect this has an indirection for as. E.g unsigned int as;
>

This is a change from the previous commit and *dev is needed for
taking the rwlock of the vq_group -> asid relation anyway, as function
like vduse_dev_sync_single_for_cpu could run at the same time as
VHOST_VDPA_SET_GROUP_ASID.

I'm ok with adding the "unsigned int as" indirection but it involves
memory walking that is not needed.

> >  };
> > @@ -94,7 +100,7 @@ struct vduse_dev {
> >         struct vduse_vdpa *vdev;
> >         struct device *dev;
> >         struct vduse_virtqueue **vqs;
> > -       struct vduse_iova_domain *domain;
> > +       struct vduse_as as;
>
> This needs to be an array per title:
>
> "vduse: create vduse_as to make it an array"
>

I meant "to make it an array in next patches". Would it work if I just
change the patch subject to:

"create vduse_as to contain per-as members"

And then specify that it will be converted to an array later in the
patch message? Or do you prefer me just to squash the patches?

> >         char *name;
> >         struct mutex lock;
> >         spinlock_t msg_lock;
> > @@ -122,9 +128,7 @@ struct vduse_dev {
> >         u32 vq_num;
> >         u32 vq_align;
> >         u32 ngroups;
> > -       struct vduse_umem *umem;
> >         struct vduse_vq_group_int *groups;
> > -       struct mutex mem_lock;
> >         unsigned int bounce_size;
> >         rwlock_t domain_lock;
> >  };
> > @@ -439,7 +443,7 @@ static __poll_t vduse_dev_poll(struct file *file, poll_table *wait)
> >  static void vduse_dev_reset(struct vduse_dev *dev)
> >  {
> >         int i;
> > -       struct vduse_iova_domain *domain = dev->domain;
> > +       struct vduse_iova_domain *domain = dev->as.domain;
>
> This should be an iteration of all address spaces?
>

Yes, it will be in next patches.

The rest of the comments have the same reply actually, as this patch
just prepares the struct to make it an array in patches as small as
possible.

> >
> >         /* The coherent mappings are handled in vduse_dev_free_coherent() */
> >         if (domain && domain->bounce_map)
> > @@ -788,13 +792,13 @@ static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
> >         struct vduse_dev *dev = vdpa_to_vduse(vdpa);
> >         int ret;
> >
> > -       ret = vduse_domain_set_map(dev->domain, iotlb);
> > +       ret = vduse_domain_set_map(dev->as.domain, iotlb);
>
> The as should be indexed by asid here or I think i miss something. Or
> at least fail with asid != 0?
>
> >         if (ret)
> >                 return ret;
> >
> >         ret = vduse_dev_update_iotlb(dev, 0ULL, ULLONG_MAX);
> >         if (ret) {
> > -               vduse_domain_clear_map(dev->domain, iotlb);
> > +               vduse_domain_clear_map(dev->as.domain, iotlb);
> >                 return ret;
> >         }
> >
> > @@ -846,7 +850,7 @@ static void vduse_dev_sync_single_for_device(union virtio_map token,
> >                                              enum dma_data_direction dir)
> >  {
> >         struct vduse_dev *vdev = token.group->dev;
> > -       struct vduse_iova_domain *domain = vdev->domain;
> > +       struct vduse_iova_domain *domain = vdev->as.domain;
>
> The address space should be fetched from the virtqueue group instead
> of the hard-coded one.
>
> >
> >         vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
> >  }
> > @@ -856,7 +860,7 @@ static void vduse_dev_sync_single_for_cpu(union virtio_map token,
> >                                              enum dma_data_direction dir)
> >  {
> >         struct vduse_dev *vdev = token.group->dev;
> > -       struct vduse_iova_domain *domain = vdev->domain;
> > +       struct vduse_iova_domain *domain = vdev->as.domain;
> >
> >         vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
> >  }
> > @@ -867,7 +871,7 @@ static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page,
> >                                      unsigned long attrs)
> >  {
> >         struct vduse_dev *vdev = token.group->dev;
> > -       struct vduse_iova_domain *domain = vdev->domain;
> > +       struct vduse_iova_domain *domain = vdev->as.domain;
> >
> >         return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
> >  }
> > @@ -877,7 +881,7 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
> >                                  unsigned long attrs)
> >  {
> >         struct vduse_dev *vdev = token.group->dev;
> > -       struct vduse_iova_domain *domain = vdev->domain;
> > +       struct vduse_iova_domain *domain = vdev->as.domain;
> >
> >         return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
> >  }
> > @@ -886,7 +890,7 @@ static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
> >                                       dma_addr_t *dma_addr, gfp_t flag)
> >  {
> >         struct vduse_dev *vdev = token.group->dev;
> > -       struct vduse_iova_domain *domain = vdev->domain;
> > +       struct vduse_iova_domain *domain = vdev->as.domain;
> >         unsigned long iova;
> >         void *addr;
> >
> > @@ -906,7 +910,7 @@ static void vduse_dev_free_coherent(union virtio_map token, size_t size,
> >                                     unsigned long attrs)
> >  {
> >         struct vduse_dev *vdev = token.group->dev;
> > -       struct vduse_iova_domain *domain = vdev->domain;
> > +       struct vduse_iova_domain *domain = vdev->as.domain;
> >
> >         vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
> >  }
> > @@ -914,7 +918,7 @@ static void vduse_dev_free_coherent(union virtio_map token, size_t size,
> >  static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
> >  {
> >         struct vduse_dev *vdev = token.group->dev;
> > -       struct vduse_iova_domain *domain = vdev->domain;
> > +       struct vduse_iova_domain *domain = vdev->as.domain;
> >
> >         return dma_addr < domain->bounce_size;
> >  }
> > @@ -929,7 +933,7 @@ static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr)
> >  static size_t vduse_dev_max_mapping_size(union virtio_map token)
> >  {
> >         struct vduse_dev *vdev = token.group->dev;
> > -       struct vduse_iova_domain *domain = vdev->domain;
> > +       struct vduse_iova_domain *domain = vdev->as.domain;
> >
> >         return domain->bounce_size;
> >  }
> > @@ -1076,29 +1080,29 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> >  {
> >         int ret;
> >
> > -       mutex_lock(&dev->mem_lock);
> > +       mutex_lock(&dev->as.mem_lock);
> >         ret = -ENOENT;
> > -       if (!dev->umem)
> > +       if (!dev->as.umem)
> >                 goto unlock;
> >
> >         ret = -EINVAL;
> > -       if (!dev->domain)
> > +       if (!dev->as.domain)
> >                 goto unlock;
> >
> > -       if (dev->umem->iova != iova || size != dev->domain->bounce_size)
> > +       if (dev->as.umem->iova != iova || size != dev->as.domain->bounce_size)
>
> I think it would be better to assume ASID = 0 here to ease the future extension.
>
> >                 goto unlock;
> >
> > -       vduse_domain_remove_user_bounce_pages(dev->domain);
> > -       unpin_user_pages_dirty_lock(dev->umem->pages,
> > -                                   dev->umem->npages, true);
> > -       atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
> > -       mmdrop(dev->umem->mm);
> > -       vfree(dev->umem->pages);
> > -       kfree(dev->umem);
> > -       dev->umem = NULL;
> > +       vduse_domain_remove_user_bounce_pages(dev->as.domain);
> > +       unpin_user_pages_dirty_lock(dev->as.umem->pages,
> > +                                   dev->as.umem->npages, true);
> > +       atomic64_sub(dev->as.umem->npages, &dev->as.umem->mm->pinned_vm);
> > +       mmdrop(dev->as.umem->mm);
> > +       vfree(dev->as.umem->pages);
> > +       kfree(dev->as.umem);
> > +       dev->as.umem = NULL;
> >         ret = 0;
> >  unlock:
> > -       mutex_unlock(&dev->mem_lock);
> > +       mutex_unlock(&dev->as.mem_lock);
> >         return ret;
> >  }
> >
> > @@ -1111,14 +1115,14 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev,
> >         unsigned long npages, lock_limit;
> >         int ret;
> >
> > -       if (!dev->domain || !dev->domain->bounce_map ||
> > -           size != dev->domain->bounce_size ||
> > +       if (!dev->as.domain || !dev->as.domain->bounce_map ||
> > +           size != dev->as.domain->bounce_size ||
> >             iova != 0 || uaddr & ~PAGE_MASK)
> >                 return -EINVAL;
> >
> > -       mutex_lock(&dev->mem_lock);
> > +       mutex_lock(&dev->as.mem_lock);
> >         ret = -EEXIST;
> > -       if (dev->umem)
> > +       if (dev->as.umem)
> >                 goto unlock;
> >
> >         ret = -ENOMEM;
> > @@ -1142,7 +1146,7 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev,
> >                 goto out;
> >         }
> >
> > -       ret = vduse_domain_add_user_bounce_pages(dev->domain,
> > +       ret = vduse_domain_add_user_bounce_pages(dev->as.domain,
> >                                                  page_list, pinned);
> >         if (ret)
> >                 goto out;
> > @@ -1155,7 +1159,7 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev,
> >         umem->mm = current->mm;
> >         mmgrab(current->mm);
> >
> > -       dev->umem = umem;
> > +       dev->as.umem = umem;
> >  out:
> >         if (ret && pinned > 0)
> >                 unpin_user_pages(page_list, pinned);
> > @@ -1166,7 +1170,7 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev,
> >                 vfree(page_list);
> >                 kfree(umem);
> >         }
> > -       mutex_unlock(&dev->mem_lock);
> > +       mutex_unlock(&dev->as.mem_lock);
> >         return ret;
> >  }
> >
> > @@ -1212,12 +1216,12 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
> >                         break;
> >
> >                 read_lock(&dev->domain_lock);
> > -               if (!dev->domain) {
> > +               if (!dev->as.domain) {
> >                         read_unlock(&dev->domain_lock);
> >                         break;
> >                 }
> > -               spin_lock(&dev->domain->iotlb_lock);
> > -               map = vhost_iotlb_itree_first(dev->domain->iotlb,
> > +               spin_lock(&dev->as.domain->iotlb_lock);
> > +               map = vhost_iotlb_itree_first(dev->as.domain->iotlb,
> >                                               entry.start, entry.last);
> >                 if (map) {
> >                         map_file = (struct vdpa_map_file *)map->opaque;
> > @@ -1227,7 +1231,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
> >                         entry.last = map->last;
> >                         entry.perm = map->perm;
> >                 }
> > -               spin_unlock(&dev->domain->iotlb_lock);
> > +               spin_unlock(&dev->as.domain->iotlb_lock);
> >                 read_unlock(&dev->domain_lock);
> >                 ret = -EINVAL;
> >                 if (!f)
> > @@ -1430,22 +1434,22 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
> >                         break;
> >
> >                 read_lock(&dev->domain_lock);
> > -               if (!dev->domain) {
> > +               if (!dev->as.domain) {
> >                         read_unlock(&dev->domain_lock);
> >                         break;
> >                 }
> > -               spin_lock(&dev->domain->iotlb_lock);
> > -               map = vhost_iotlb_itree_first(dev->domain->iotlb,
> > +               spin_lock(&dev->as.domain->iotlb_lock);
> > +               map = vhost_iotlb_itree_first(dev->as.domain->iotlb,
> >                                               info.start, info.last);
> >                 if (map) {
> >                         info.start = map->start;
> >                         info.last = map->last;
> >                         info.capability = 0;
> > -                       if (dev->domain->bounce_map && map->start == 0 &&
> > -                           map->last == dev->domain->bounce_size - 1)
> > +                       if (dev->as.domain->bounce_map && map->start == 0 &&
> > +                           map->last == dev->as.domain->bounce_size - 1)
> >                                 info.capability |= VDUSE_IOVA_CAP_UMEM;
> >                 }
> > -               spin_unlock(&dev->domain->iotlb_lock);
> > +               spin_unlock(&dev->as.domain->iotlb_lock);
> >                 read_unlock(&dev->domain_lock);
> >                 if (!map)
> >                         break;
> > @@ -1470,8 +1474,8 @@ static int vduse_dev_release(struct inode *inode, struct file *file)
> >         struct vduse_dev *dev = file->private_data;
> >
> >         write_lock(&dev->domain_lock);
> > -       if (dev->domain)
> > -               vduse_dev_dereg_umem(dev, 0, dev->domain->bounce_size);
> > +       if (dev->as.domain)
> > +               vduse_dev_dereg_umem(dev, 0, dev->as.domain->bounce_size);
> >         write_unlock(&dev->domain_lock);
> >         spin_lock(&dev->msg_lock);
> >         /* Make sure the inflight messages can processed after reconncection */
> > @@ -1690,7 +1694,7 @@ static struct vduse_dev *vduse_dev_create(void)
> >                 return NULL;
> >
> >         mutex_init(&dev->lock);
> > -       mutex_init(&dev->mem_lock);
> > +       mutex_init(&dev->as.mem_lock);
> >         rwlock_init(&dev->domain_lock);
> >         spin_lock_init(&dev->msg_lock);
> >         INIT_LIST_HEAD(&dev->send_list);
> > @@ -1741,8 +1745,8 @@ static int vduse_destroy_dev(char *name)
> >         idr_remove(&vduse_idr, dev->minor);
> >         kvfree(dev->config);
> >         vduse_dev_deinit_vqs(dev);
> > -       if (dev->domain)
> > -               vduse_domain_destroy(dev->domain);
> > +       if (dev->as.domain)
> > +               vduse_domain_destroy(dev->as.domain);
> >         kfree(dev->name);
> >         kfree(dev->groups);
> >         vduse_dev_destroy(dev);
> > @@ -1858,7 +1862,7 @@ static ssize_t bounce_size_store(struct device *device,
> >
> >         ret = -EPERM;
> >         write_lock(&dev->domain_lock);
> > -       if (dev->domain)
> > +       if (dev->as.domain)
> >                 goto unlock;
> >
> >         ret = kstrtouint(buf, 10, &bounce_size);
> > @@ -2109,11 +2113,11 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
> >                 return ret;
> >
> >         write_lock(&dev->domain_lock);
> > -       if (!dev->domain)
> > -               dev->domain = vduse_domain_create(VDUSE_IOVA_SIZE - 1,
> > +       if (!dev->as.domain)
> > +               dev->as.domain = vduse_domain_create(VDUSE_IOVA_SIZE - 1,
> >                                                   dev->bounce_size);
> >         write_unlock(&dev->domain_lock);
> > -       if (!dev->domain) {
> > +       if (!dev->as.domain) {
> >                 put_device(&dev->vdev->vdpa.dev);
> >                 return -ENOMEM;
> >         }
> > @@ -2122,8 +2126,8 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
> >         if (ret) {
> >                 put_device(&dev->vdev->vdpa.dev);
> >                 write_lock(&dev->domain_lock);
> > -               vduse_domain_destroy(dev->domain);
> > -               dev->domain = NULL;
> > +               vduse_domain_destroy(dev->as.domain);
> > +               dev->as.domain = NULL;
> >                 write_unlock(&dev->domain_lock);
> >                 return ret;
> >         }
> > --
> > 2.51.0
> >
>
> Thanks
>


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ