[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEsVE83Y5j8-aUBEBRqVc0xG=ChtqojR0B-c9oWuOut9HA@mail.gmail.com>
Date: Mon, 22 Sep 2025 09:30:33 +0800
From: Jason Wang <jasowang@...hat.com>
To: Eugenio Pérez <eperezma@...hat.com>
Cc: "Michael S . Tsirkin" <mst@...hat.com>, Yongji Xie <xieyongji@...edance.com>,
Maxime Coquelin <mcoqueli@...hat.com>, linux-kernel@...r.kernel.org,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>, virtualization@...ts.linux.dev,
Cindy Lu <lulu@...hat.com>, Laurent Vivier <lvivier@...hat.com>,
Stefano Garzarella <sgarzare@...hat.com>
Subject: Re: [PATCH v3 4/6] vduse: return internal vq group struct as map token
On Fri, Sep 19, 2025 at 5:34 PM Eugenio Pérez <eperezma@...hat.com> wrote:
>
> Return the internal struct that represents the vq group as virtqueue map
> token, instead of the device. This allows the map functions to access
> the information per group.
>
> At this moment all the virtqueues share the same vq group, that only
> can point to ASID 0. This change prepares the infrastructure for actual
> per-group address space handling
>
> Signed-off-by: Eugenio Pérez <eperezma@...hat.com>
> ---
> v3:
> * Adapt all virtio_map_ops callbacks to handle empty tokens in case of
> invalid groups.
> * Make setting status DRIVER_OK fail if vq group is not valid.
> * Remove the _int name suffix from struct vduse_vq_group.
>
> RFC v3:
> * Make the vq groups a dynamic array to support an arbitrary number of
> them.
> ---
> drivers/vdpa/vdpa_user/vduse_dev.c | 111 ++++++++++++++++++++++++++---
> include/linux/virtio.h | 6 +-
> 2 files changed, 105 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> index 7ddd81456f7b..6fdabc3c910e 100644
> --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> @@ -23,6 +23,7 @@
> #include <linux/uio.h>
> #include <linux/vdpa.h>
> #include <linux/nospec.h>
> +#include <linux/virtio.h>
> #include <linux/vmalloc.h>
> #include <linux/sched/mm.h>
> #include <uapi/linux/vduse.h>
> @@ -88,6 +89,10 @@ struct vduse_umem {
> struct mm_struct *mm;
> };
>
> +struct vduse_vq_group {
> + struct vduse_dev *dev;
> +};
> +
> struct vduse_dev {
> struct vduse_vdpa *vdev;
> struct device *dev;
> @@ -121,6 +126,7 @@ struct vduse_dev {
> u32 vq_align;
> u32 ngroups;
> struct vduse_umem *umem;
> + struct vduse_vq_group *groups;
> struct mutex mem_lock;
> unsigned int bounce_size;
> rwlock_t domain_lock;
> @@ -308,6 +314,13 @@ static int vduse_dev_set_status(struct vduse_dev *dev, u8 status)
> msg.req.type = VDUSE_SET_STATUS;
> msg.req.s.status = status;
>
> + if (dev->api_version >= VDUSE_API_VERSION_1 &&
> + !(dev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
> + status & VIRTIO_CONFIG_S_DRIVER_OK)
I don't understand why the INVALID check is only valid in the 0->1
transition here.
> + for (u32 i = 0; i < dev->vq_num; ++i)
> + if (dev->vqs[i]->vq_group == VQ_GROUP_INVALID)
> + return -EIO;
> +
> return vduse_dev_msg_sync(dev, &msg);
> }
>
> @@ -612,6 +625,20 @@ static s64 vduse_get_vq_group(struct vdpa_device *vdpa, u16 idx)
> return dev->vqs[idx]->vq_group;
> }
>
> +static union virtio_map vduse_get_vq_map(struct vdpa_device *vdpa, u16 idx)
> +{
> + struct vduse_dev *dev = vdpa_to_vduse(vdpa);
> + u32 vq_group = vduse_get_vq_group(vdpa, idx);
> + union virtio_map ret;
> +
> + if (vq_group < 0)
> + ret.group = NULL;
> + else
> + ret.group = &dev->groups[vq_group];
> +
> + return ret;
> +}
> +
> static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
> struct vdpa_vq_state *state)
> {
> @@ -832,6 +859,7 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
> .get_vq_affinity = vduse_vdpa_get_vq_affinity,
> .reset = vduse_vdpa_reset,
> .set_map = vduse_vdpa_set_map,
> + .get_vq_map = vduse_get_vq_map,
> .free = vduse_vdpa_free,
> };
>
> @@ -839,7 +867,14 @@ static void vduse_dev_sync_single_for_device(union virtio_map token,
> dma_addr_t dma_addr, size_t size,
> enum dma_data_direction dir)
> {
> - struct vduse_iova_domain *domain = token.iova_domain;
> + struct vduse_dev *vdev;
> + struct vduse_iova_domain *domain;
> +
> + if (!token.group)
> + return;
> +
> + vdev = token.group->dev;
> + domain = vdev->domain;
>
> vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
> }
> @@ -848,7 +883,14 @@ static void vduse_dev_sync_single_for_cpu(union virtio_map token,
> dma_addr_t dma_addr, size_t size,
> enum dma_data_direction dir)
> {
> - struct vduse_iova_domain *domain = token.iova_domain;
> + struct vduse_dev *vdev;
> + struct vduse_iova_domain *domain;
> +
> + if (!token.group)
> + return;
> +
> + vdev = token.group->dev;
> + domain = vdev->domain;
>
> vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
> }
> @@ -858,7 +900,14 @@ static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page,
> enum dma_data_direction dir,
> unsigned long attrs)
> {
> - struct vduse_iova_domain *domain = token.iova_domain;
> + struct vduse_dev *vdev;
> + struct vduse_iova_domain *domain;
> +
> + if (!token.group)
> + return DMA_MAPPING_ERROR;
> +
> + vdev = token.group->dev;
> + domain = vdev->domain;
>
> return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
> }
> @@ -867,7 +916,14 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
> size_t size, enum dma_data_direction dir,
> unsigned long attrs)
> {
> - struct vduse_iova_domain *domain = token.iova_domain;
> + struct vduse_dev *vdev;
> + struct vduse_iova_domain *domain;
> +
> + if (!token.group)
> + return;
> +
> + vdev = token.group->dev;
> + domain = vdev->domain;
>
> return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
> }
> @@ -875,11 +931,17 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
> static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
> dma_addr_t *dma_addr, gfp_t flag)
> {
> - struct vduse_iova_domain *domain = token.iova_domain;
> + struct vduse_dev *vdev;
> + struct vduse_iova_domain *domain;
> unsigned long iova;
> void *addr;
>
> *dma_addr = DMA_MAPPING_ERROR;
> + if (!token.group)
> + return NULL;
> +
> + vdev = token.group->dev;
> + domain = vdev->domain;
> addr = vduse_domain_alloc_coherent(domain, size,
> (dma_addr_t *)&iova, flag);
> if (!addr)
> @@ -894,14 +956,28 @@ static void vduse_dev_free_coherent(union virtio_map token, size_t size,
> void *vaddr, dma_addr_t dma_addr,
> unsigned long attrs)
> {
> - struct vduse_iova_domain *domain = token.iova_domain;
> + struct vduse_dev *vdev;
> + struct vduse_iova_domain *domain;
> +
> + if (!token.group)
> + return;
> +
> + vdev = token.group->dev;
> + domain = vdev->domain;
>
> vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
> }
>
> static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
> {
> - struct vduse_iova_domain *domain = token.iova_domain;
> + struct vduse_dev *vdev;
> + struct vduse_iova_domain *domain;
> +
> + if (!token.group)
> + return false;
> +
> + vdev = token.group->dev;
> + domain = vdev->domain;
>
> return dma_addr < domain->bounce_size;
> }
> @@ -915,7 +991,14 @@ static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr)
>
> static size_t vduse_dev_max_mapping_size(union virtio_map token)
> {
> - struct vduse_iova_domain *domain = token.iova_domain;
> + struct vduse_dev *vdev;
> + struct vduse_iova_domain *domain;
> +
> + if (!token.group)
> + return 0;
> +
> + vdev = token.group->dev;
> + domain = vdev->domain;
>
> return domain->bounce_size;
> }
> @@ -1733,6 +1816,7 @@ static int vduse_destroy_dev(char *name)
> if (dev->domain)
> vduse_domain_destroy(dev->domain);
> kfree(dev->name);
> + kfree(dev->groups);
> vduse_dev_destroy(dev);
> module_put(THIS_MODULE);
>
> @@ -1899,7 +1983,15 @@ static int vduse_create_dev(struct vduse_dev_config *config,
> dev->device_features = config->features;
> dev->device_id = config->device_id;
> dev->vendor_id = config->vendor_id;
> +
Unnecessary change?
> dev->ngroups = (dev->api_version < 1) ? 1 : (config->ngroups ?: 1);
> + dev->groups = kcalloc(dev->ngroups, sizeof(dev->groups[0]),
> + GFP_KERNEL);
> + if (!dev->groups)
> + goto err_vq_groups;
> + for (u32 i = 0; i < dev->ngroups; ++i)
> + dev->groups[i].dev = dev;
> +
> dev->name = kstrdup(config->name, GFP_KERNEL);
> if (!dev->name)
> goto err_str;
> @@ -1936,6 +2028,8 @@ static int vduse_create_dev(struct vduse_dev_config *config,
> err_idr:
> kfree(dev->name);
> err_str:
> + kfree(dev->groups);
> +err_vq_groups:
> vduse_dev_destroy(dev);
> err:
> return ret;
> @@ -2097,7 +2191,6 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
> return -ENOMEM;
> }
>
> - dev->vdev->vdpa.vmap.iova_domain = dev->domain;
> ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
> if (ret) {
> put_device(&dev->vdev->vdpa.dev);
> diff --git a/include/linux/virtio.h b/include/linux/virtio.h
> index 96c66126c074..302109029700 100644
> --- a/include/linux/virtio.h
> +++ b/include/linux/virtio.h
> @@ -41,13 +41,13 @@ struct virtqueue {
> void *priv;
> };
>
> -struct vduse_iova_domain;
> +struct vduse_vq_group;
>
> union virtio_map {
> /* Device that performs DMA */
> struct device *dma_dev;
> - /* VDUSE specific mapping data */
> - struct vduse_iova_domain *iova_domain;
> + /* VDUSE specific virtqueue group for doing map */
> + struct vduse_vq_group *group;
> };
>
> int virtqueue_add_outbuf(struct virtqueue *vq,
> --
> 2.51.0
>
Thanks
Powered by blists - more mailing lists