lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250807115752.1663383-3-eperezma@redhat.com>
Date: Thu,  7 Aug 2025 13:57:47 +0200
From: Eugenio Pérez <eperezma@...hat.com>
To: "Michael S . Tsirkin " <mst@...hat.com>
Cc: Cindy Lu <lulu@...hat.com>,
	Eugenio Pérez <eperezma@...hat.com>,
	Yongji Xie <xieyongji@...edance.com>,
	Stefano Garzarella <sgarzare@...hat.com>,
	virtualization@...ts.linux.dev,
	Laurent Vivier <lvivier@...hat.com>,
	linux-kernel@...r.kernel.org,
	Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
	jasowang@...hat.com,
	Maxime Coquelin <mcoqueli@...hat.com>
Subject: [RFC v2 2/7] vduse: add vq group support

This allows sepparate the different virtqueues in groups that shares the
same address space.  Asking the VDUSE device for the groups of the vq at
the beginning as they're needed for the DMA API.

Allocating 3 vq groups as net is the device that need the most groups:
* Dataplane (guest passthrough)
* CVQ
* Shadowed vrings.

Future versions of the series can include dynamic allocation of the
groups array so VDUSE can declare more groups.

Signed-off-by: Eugenio Pérez <eperezma@...hat.com>
---
v2:
* Cache group information in kernel, as we need to provide the vq map
  tokens properly.
* Add descs vq group to optimize SVQ forwarding and support indirect
  descriptors out of the box.
---
 drivers/vdpa/vdpa_user/vduse_dev.c | 71 +++++++++++++++++++++++++++++-
 include/uapi/linux/vduse.h         | 19 +++++++-
 2 files changed, 88 insertions(+), 2 deletions(-)

diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index d858c4389cc1..d1f6d00a9c71 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -46,6 +46,11 @@
 #define VDUSE_IOVA_SIZE (VDUSE_MAX_BOUNCE_SIZE + 128 * 1024 * 1024)
 #define VDUSE_MSG_DEFAULT_TIMEOUT 30
 
+/*
+ * Let's make it 3 for simplicity.
+ */
+#define VDUSE_MAX_VQ_GROUPS 3
+
 #define IRQ_UNBOUND -1
 
 struct vduse_virtqueue {
@@ -58,6 +63,8 @@ struct vduse_virtqueue {
 	struct vdpa_vq_state state;
 	bool ready;
 	bool kicked;
+	u32 vq_group;
+	u32 vq_desc_group;
 	spinlock_t kick_lock;
 	spinlock_t irq_lock;
 	struct eventfd_ctx *kickfd;
@@ -114,6 +121,7 @@ struct vduse_dev {
 	u8 status;
 	u32 vq_num;
 	u32 vq_align;
+	u32 ngroups;
 	struct vduse_umem *umem;
 	struct mutex mem_lock;
 	unsigned int bounce_size;
@@ -592,6 +600,20 @@ static int vduse_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 idx,
 	return 0;
 }
 
+static u32 vduse_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+	struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+	return dev->vqs[idx]->vq_group;
+}
+
+static u32 vduse_get_vq_desc_group(struct vdpa_device *vdpa, u16 idx)
+{
+	struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+	return dev->vqs[idx]->vq_desc_group;
+}
+
 static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
 				struct vdpa_vq_state *state)
 {
@@ -678,13 +700,48 @@ static u8 vduse_vdpa_get_status(struct vdpa_device *vdpa)
 	return dev->status;
 }
 
+static int vduse_fill_vq_groups(struct vduse_dev *dev)
+{
+	if (dev->api_version < VDUSE_API_VERSION_1)
+		return 0;
+
+	for (int i = 0; i < dev->vdev->vdpa.nvqs; ++i) {
+		struct vduse_dev_msg msg = { 0 };
+		int ret;
+
+		msg.req.type = VDUSE_GET_VQ_GROUP;
+		msg.req.vq_group.index = i;
+		ret = vduse_dev_msg_sync(dev, &msg);
+		if (ret)
+			return ret;
+
+		dev->vqs[i]->vq_group = msg.resp.vq_group.num;
+
+		msg.req.type = VDUSE_GET_VRING_DESC_GROUP;
+		ret = vduse_dev_msg_sync(dev, &msg);
+		if (ret)
+			return ret;
+
+		dev->vqs[i]->vq_desc_group = msg.resp.vq_group.num;
+	}
+
+	return 0;
+}
+
 static void vduse_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
 {
 	struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+	u8 previous_status = dev->status;
 
 	if (vduse_dev_set_status(dev, status))
 		return;
 
+	if ((dev->status ^ previous_status) &
+	     BIT_ULL(VIRTIO_CONFIG_S_FEATURES_OK) &&
+	    status & (1ULL << VIRTIO_CONFIG_S_FEATURES_OK))
+		if (vduse_fill_vq_groups(dev))
+			return;
+
 	dev->status = status;
 }
 
@@ -789,6 +846,8 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
 	.set_vq_cb		= vduse_vdpa_set_vq_cb,
 	.set_vq_num             = vduse_vdpa_set_vq_num,
 	.get_vq_size		= vduse_vdpa_get_vq_size,
+	.get_vq_group		= vduse_get_vq_group,
+	.get_vq_desc_group	= vduse_get_vq_desc_group,
 	.set_vq_ready		= vduse_vdpa_set_vq_ready,
 	.get_vq_ready		= vduse_vdpa_get_vq_ready,
 	.set_vq_state		= vduse_vdpa_set_vq_state,
@@ -1856,6 +1915,16 @@ static int vduse_create_dev(struct vduse_dev_config *config,
 	dev->device_features = config->features;
 	dev->device_id = config->device_id;
 	dev->vendor_id = config->vendor_id;
+	if (dev->api_version >= 1) {
+		if (config->ngroups > VDUSE_MAX_VQ_GROUPS) {
+			pr_err("Not creating a VDUSE device with %u vq groups. Max: %u",
+				config->ngroups, VDUSE_MAX_VQ_GROUPS);
+			goto err;
+		}
+		dev->ngroups = config->ngroups ?: 1;
+	} else {
+		dev->ngroups = 1;
+	}
 	dev->name = kstrdup(config->name, GFP_KERNEL);
 	if (!dev->name)
 		goto err_str;
@@ -2016,7 +2085,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
 
 	vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
 				 &vduse_vdpa_config_ops, &vduse_map_ops,
-				 1, 1, name, true);
+				 dev->ngroups, 1, name, true);
 	if (IS_ERR(vdev))
 		return PTR_ERR(vdev);
 
diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h
index 9a56d0416bfe..b4b139dc76bb 100644
--- a/include/uapi/linux/vduse.h
+++ b/include/uapi/linux/vduse.h
@@ -31,6 +31,7 @@
  * @features: virtio features
  * @vq_num: the number of virtqueues
  * @vq_align: the allocation alignment of virtqueue's metadata
+ * @ngroups: number of vq groups that VDUSE device declares
  * @reserved: for future use, needs to be initialized to zero
  * @config_size: the size of the configuration space
  * @config: the buffer of the configuration space
@@ -45,7 +46,8 @@ struct vduse_dev_config {
 	__u64 features;
 	__u32 vq_num;
 	__u32 vq_align;
-	__u32 reserved[13];
+	__u32 ngroups; /* if VDUSE_API_VERSION >= 1 */
+	__u32 reserved[12];
 	__u32 config_size;
 	__u8 config[];
 };
@@ -160,6 +162,16 @@ struct vduse_vq_state_packed {
 	__u16 last_used_idx;
 };
 
+/**
+ * struct vduse_vq_group - virtqueue group
+ * @num: Index of the virtqueue group
+ * @num: Group
+ */
+struct vduse_vq_group {
+	__u32 index;
+	__u32 num;
+};
+
 /**
  * struct vduse_vq_info - information of a virtqueue
  * @index: virtqueue index
@@ -182,6 +194,7 @@ struct vduse_vq_info {
 	union {
 		struct vduse_vq_state_split split;
 		struct vduse_vq_state_packed packed;
+		struct vduse_vq_group group;
 	};
 	__u8 ready;
 };
@@ -274,6 +287,8 @@ enum vduse_req_type {
 	VDUSE_GET_VQ_STATE,
 	VDUSE_SET_STATUS,
 	VDUSE_UPDATE_IOTLB,
+	VDUSE_GET_VQ_GROUP,
+	VDUSE_GET_VRING_DESC_GROUP,
 };
 
 /**
@@ -328,6 +343,7 @@ struct vduse_dev_request {
 		struct vduse_vq_state vq_state;
 		struct vduse_dev_status s;
 		struct vduse_iova_range iova;
+		struct vduse_vq_group vq_group; /* Only if vduse api version >= 1 */
 		__u32 padding[32];
 	};
 };
@@ -350,6 +366,7 @@ struct vduse_dev_response {
 	__u32 reserved[4];
 	union {
 		struct vduse_vq_state vq_state;
+		struct vduse_vq_group vq_group; /* Only if vduse api version >= 1 */
 		__u32 padding[32];
 	};
 };
-- 
2.50.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ