lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEuY359QtVeTY40Z_L1zhG87rB2te=qc7YM6gZxcKM=m7w@mail.gmail.com>
Date: Wed, 17 Sep 2025 16:36:49 +0800
From: Jason Wang <jasowang@...hat.com>
To: Eugenio Pérez <eperezma@...hat.com>
Cc: "Michael S . Tsirkin" <mst@...hat.com>, Stefano Garzarella <sgarzare@...hat.com>, 
	Xuan Zhuo <xuanzhuo@...ux.alibaba.com>, linux-kernel@...r.kernel.org, 
	Maxime Coquelin <mcoqueli@...hat.com>, Yongji Xie <xieyongji@...edance.com>, 
	Cindy Lu <lulu@...hat.com>, Laurent Vivier <lvivier@...hat.com>, virtualization@...ts.linux.dev
Subject: Re: [PATCH v2 4/7] vduse: return internal vq group struct as map token

On Tue, Sep 16, 2025 at 9:09 PM Eugenio Pérez <eperezma@...hat.com> wrote:
>
> Return the internal struct that represents the vq group as virtqueue map
> token, instead of the device.  This allows the map functions to access
> the information per group.
>
> At this moment all the virtqueues share the same vq group, that only
> can point to ASID 0.  This change prepares the infrastructure for actual
> per-group address space handling
>
> Signed-off-by: Eugenio Pérez <eperezma@...hat.com>
> ---
> RFC v3:
> * Make the vq groups a dynamic array to support an arbitrary number of
>   them.
> ---
>  drivers/vdpa/vdpa_user/vduse_dev.c | 52 ++++++++++++++++++++++++------
>  include/linux/virtio.h             |  6 ++--
>  2 files changed, 46 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> index 42f8807911d4..9c12ae72abc2 100644
> --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> @@ -23,6 +23,7 @@
>  #include <linux/uio.h>
>  #include <linux/vdpa.h>
>  #include <linux/nospec.h>
> +#include <linux/virtio.h>
>  #include <linux/vmalloc.h>
>  #include <linux/sched/mm.h>
>  #include <uapi/linux/vduse.h>
> @@ -85,6 +86,10 @@ struct vduse_umem {
>         struct mm_struct *mm;
>  };
>
> +struct vduse_vq_group_int {
> +       struct vduse_dev *dev;
> +};

I remember we had some discussion over this, and the conclusion is to
have a better name.

Maybe just vduse_vq_group?

And to be conceptually correct, we need to use iova_domain here
instead of the vduse_dev. More below.

> +
>  struct vduse_dev {
>         struct vduse_vdpa *vdev;
>         struct device *dev;
> @@ -118,6 +123,7 @@ struct vduse_dev {
>         u32 vq_align;
>         u32 ngroups;
>         struct vduse_umem *umem;
> +       struct vduse_vq_group_int *groups;
>         struct mutex mem_lock;
>         unsigned int bounce_size;
>         rwlock_t domain_lock;
> @@ -602,6 +608,15 @@ static u32 vduse_get_vq_group(struct vdpa_device *vdpa, u16 idx)
>         return dev->vqs[idx]->vq_group;
>  }
>
> +static union virtio_map vduse_get_vq_map(struct vdpa_device *vdpa, u16 idx)
> +{
> +       struct vduse_dev *dev = vdpa_to_vduse(vdpa);
> +       u32 vq_group = dev->vqs[idx]->vq_group;
> +       union virtio_map ret = { .group = &dev->groups[vq_group] };
> +
> +       return ret;
> +}
> +
>  static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
>                                 struct vdpa_vq_state *state)
>  {
> @@ -822,6 +837,7 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
>         .get_vq_affinity        = vduse_vdpa_get_vq_affinity,
>         .reset                  = vduse_vdpa_reset,
>         .set_map                = vduse_vdpa_set_map,
> +       .get_vq_map             = vduse_get_vq_map,
>         .free                   = vduse_vdpa_free,
>  };
>
> @@ -829,7 +845,8 @@ static void vduse_dev_sync_single_for_device(union virtio_map token,
>                                              dma_addr_t dma_addr, size_t size,
>                                              enum dma_data_direction dir)
>  {
> -       struct vduse_iova_domain *domain = token.iova_domain;
> +       struct vduse_dev *vdev = token.group->dev;
> +       struct vduse_iova_domain *domain = vdev->domain;

If we really want to do this, we need to move the iova_domian into the group.

>
>         vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
>  }
> @@ -838,7 +855,8 @@ static void vduse_dev_sync_single_for_cpu(union virtio_map token,
>                                              dma_addr_t dma_addr, size_t size,
>                                              enum dma_data_direction dir)
>  {
> -       struct vduse_iova_domain *domain = token.iova_domain;
> +       struct vduse_dev *vdev = token.group->dev;
> +       struct vduse_iova_domain *domain = vdev->domain;
>
>         vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
>  }
> @@ -848,7 +866,8 @@ static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page,
>                                      enum dma_data_direction dir,
>                                      unsigned long attrs)
>  {
> -       struct vduse_iova_domain *domain = token.iova_domain;
> +       struct vduse_dev *vdev = token.group->dev;
> +       struct vduse_iova_domain *domain = vdev->domain;
>
>         return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
>  }
> @@ -857,7 +876,8 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
>                                  size_t size, enum dma_data_direction dir,
>                                  unsigned long attrs)
>  {
> -       struct vduse_iova_domain *domain = token.iova_domain;
> +       struct vduse_dev *vdev = token.group->dev;
> +       struct vduse_iova_domain *domain = vdev->domain;
>
>         return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
>  }
> @@ -865,7 +885,8 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
>  static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
>                                       dma_addr_t *dma_addr, gfp_t flag)
>  {
> -       struct vduse_iova_domain *domain = token.iova_domain;
> +       struct vduse_dev *vdev = token.group->dev;
> +       struct vduse_iova_domain *domain = vdev->domain;
>         unsigned long iova;
>         void *addr;
>
> @@ -884,14 +905,16 @@ static void vduse_dev_free_coherent(union virtio_map token, size_t size,
>                                     void *vaddr, dma_addr_t dma_addr,
>                                     unsigned long attrs)
>  {
> -       struct vduse_iova_domain *domain = token.iova_domain;
> +       struct vduse_dev *vdev = token.group->dev;
> +       struct vduse_iova_domain *domain = vdev->domain;
>
>         vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
>  }
>
>  static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
>  {
> -       struct vduse_iova_domain *domain = token.iova_domain;
> +       struct vduse_dev *vdev = token.group->dev;
> +       struct vduse_iova_domain *domain = vdev->domain;
>
>         return dma_addr < domain->bounce_size;
>  }
> @@ -905,7 +928,8 @@ static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr)
>
>  static size_t vduse_dev_max_mapping_size(union virtio_map token)
>  {
> -       struct vduse_iova_domain *domain = token.iova_domain;
> +       struct vduse_dev *vdev = token.group->dev;
> +       struct vduse_iova_domain *domain = vdev->domain;
>
>         return domain->bounce_size;
>  }
> @@ -1720,6 +1744,7 @@ static int vduse_destroy_dev(char *name)
>         if (dev->domain)
>                 vduse_domain_destroy(dev->domain);
>         kfree(dev->name);
> +       kfree(dev->groups);
>         vduse_dev_destroy(dev);
>         module_put(THIS_MODULE);
>
> @@ -1885,7 +1910,15 @@ static int vduse_create_dev(struct vduse_dev_config *config,
>         dev->device_features = config->features;
>         dev->device_id = config->device_id;
>         dev->vendor_id = config->vendor_id;
> +
>         dev->ngroups = (dev->api_version < 1) ? 1 : (config->ngroups ?: 1);
> +       dev->groups = kcalloc(dev->ngroups, sizeof(dev->groups[0]),
> +                             GFP_KERNEL);
> +       if (!dev->groups)
> +               goto err_vq_groups;
> +       for (u32 i = 0; i < dev->ngroups; ++i)
> +               dev->groups[i].dev = dev;
> +
>         dev->name = kstrdup(config->name, GFP_KERNEL);
>         if (!dev->name)
>                 goto err_str;
> @@ -1922,6 +1955,8 @@ static int vduse_create_dev(struct vduse_dev_config *config,
>  err_idr:
>         kfree(dev->name);
>  err_str:
> +       kfree(dev->groups);
> +err_vq_groups:
>         vduse_dev_destroy(dev);
>  err:
>         return ret;
> @@ -2083,7 +2118,6 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
>                 return -ENOMEM;
>         }
>
> -       dev->vdev->vdpa.vmap.iova_domain = dev->domain;
>         ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
>         if (ret) {
>                 put_device(&dev->vdev->vdpa.dev);
> diff --git a/include/linux/virtio.h b/include/linux/virtio.h
> index 96c66126c074..5f8db75f7833 100644
> --- a/include/linux/virtio.h
> +++ b/include/linux/virtio.h
> @@ -41,13 +41,13 @@ struct virtqueue {
>         void *priv;
>  };
>
> -struct vduse_iova_domain;
> +struct vduse_vq_group_int;
>
>  union virtio_map {
>         /* Device that performs DMA */
>         struct device *dma_dev;
> -       /* VDUSE specific mapping data */
> -       struct vduse_iova_domain *iova_domain;
> +       /* VDUSE specific virtqueue group for doing map */
> +       struct vduse_vq_group_int *group;
>  };
>
>  int virtqueue_add_outbuf(struct virtqueue *vq,
> --
> 2.51.0
>

Thanks


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ