[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250807115752.1663383-5-eperezma@redhat.com>
Date: Thu, 7 Aug 2025 13:57:49 +0200
From: Eugenio Pérez <eperezma@...hat.com>
To: "Michael S . Tsirkin " <mst@...hat.com>
Cc: Cindy Lu <lulu@...hat.com>,
Eugenio Pérez <eperezma@...hat.com>,
Yongji Xie <xieyongji@...edance.com>,
Stefano Garzarella <sgarzare@...hat.com>,
virtualization@...ts.linux.dev,
Laurent Vivier <lvivier@...hat.com>,
linux-kernel@...r.kernel.org,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
jasowang@...hat.com,
Maxime Coquelin <mcoqueli@...hat.com>
Subject: [RFC v2 4/7] vduse: return internal vq group struct as map token
Return the internal struct that represents the vq group as virtqueue map
token, instead of the device. This allows the DMA functions to access
the information per group.
At this moment all the virtqueues share the same vq group, that only
can point to ASID 0. This change prepares the infrastructure for actual
per-group address space handling
Signed-off-by: Eugenio Pérez <eperezma@...hat.com>
---
drivers/vdpa/vdpa_user/vduse_dev.c | 44 ++++++++++++++++++++----------
1 file changed, 30 insertions(+), 14 deletions(-)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index d1f6d00a9c71..a7a2749f5818 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -90,6 +90,10 @@ struct vduse_umem {
struct mm_struct *mm;
};
+struct vduse_vq_group_int {
+ struct vduse_dev *dev;
+};
+
struct vduse_dev {
struct vduse_vdpa *vdev;
struct device *dev;
@@ -123,6 +127,7 @@ struct vduse_dev {
u32 vq_align;
u32 ngroups;
struct vduse_umem *umem;
+ struct vduse_vq_group_int groups[VDUSE_MAX_VQ_GROUPS];
struct mutex mem_lock;
unsigned int bounce_size;
struct mutex domain_lock;
@@ -614,6 +619,14 @@ static u32 vduse_get_vq_desc_group(struct vdpa_device *vdpa, u16 idx)
return dev->vqs[idx]->vq_desc_group;
}
+static void *vduse_get_vq_map_token(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ u32 vq_group = dev->vqs[idx]->vq_group;
+
+ return &dev->groups[vq_group];
+}
+
static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vdpa_vq_state *state)
{
@@ -870,6 +883,7 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.get_vq_affinity = vduse_vdpa_get_vq_affinity,
.reset = vduse_vdpa_reset,
.set_map = vduse_vdpa_set_map,
+ .get_vq_map_token = vduse_get_vq_map_token,
.free = vduse_vdpa_free,
};
@@ -877,8 +891,8 @@ static void vduse_dev_sync_single_for_device(void *token,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- struct device *dev = token;
- struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_vq_group_int *group = token;
+ struct vduse_dev *vdev = group->dev;
struct vduse_iova_domain *domain = vdev->domain;
vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
@@ -888,8 +902,8 @@ static void vduse_dev_sync_single_for_cpu(void *token,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- struct device *dev = token;
- struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_vq_group_int *group = token;
+ struct vduse_dev *vdev = group->dev;
struct vduse_iova_domain *domain = vdev->domain;
vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
@@ -900,8 +914,8 @@ static dma_addr_t vduse_dev_map_page(void *token, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct device *dev = token;
- struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_vq_group_int *group = token;
+ struct vduse_dev *vdev = group->dev;
struct vduse_iova_domain *domain = vdev->domain;
return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
@@ -911,8 +925,8 @@ static void vduse_dev_unmap_page(void *token, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- struct device *dev = token;
- struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_vq_group_int *group = token;
+ struct vduse_dev *vdev = group->dev;
struct vduse_iova_domain *domain = vdev->domain;
return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
@@ -921,8 +935,8 @@ static void vduse_dev_unmap_page(void *token, dma_addr_t dma_addr,
static void *vduse_dev_alloc_coherent(void *token, size_t size,
dma_addr_t *dma_addr, gfp_t flag)
{
- struct device *dev = token;
- struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_vq_group_int *group = token;
+ struct vduse_dev *vdev = group->dev;
struct vduse_iova_domain *domain = vdev->domain;
unsigned long iova;
void *addr;
@@ -942,8 +956,8 @@ static void vduse_dev_free_coherent(void *token, size_t size,
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs)
{
- struct device *dev = token;
- struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_vq_group_int *group = token;
+ struct vduse_dev *vdev = group->dev;
struct vduse_iova_domain *domain = vdev->domain;
vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
@@ -951,8 +965,8 @@ static void vduse_dev_free_coherent(void *token, size_t size,
static size_t vduse_dev_max_mapping_size(void *token)
{
- struct device *dev = token;
- struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_vq_group_int *group = token;
+ struct vduse_dev *vdev = group->dev;
struct vduse_iova_domain *domain = vdev->domain;
return domain->bounce_size;
@@ -1925,6 +1939,8 @@ static int vduse_create_dev(struct vduse_dev_config *config,
} else {
dev->ngroups = 1;
}
+ for (u32 i = 0; i < dev->ngroups; ++i)
+ dev->groups[i].dev = dev;
dev->name = kstrdup(config->name, GFP_KERNEL);
if (!dev->name)
goto err_str;
--
2.50.1
Powered by blists - more mailing lists