[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251217080451.38337-5-15927021679@163.com>
Date: Wed, 17 Dec 2025 16:03:56 +0800
From: 15927021679@....com
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
"David S . Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>
Cc: linux-kernel@...r.kernel.org,
netdev@...r.kernel.org,
xiongweimin <xiongweimin@...inos.cn>
Subject: [PATCH 03/14] examples/vhost_user_rdma: implement create and destroy completion queue commands
From: xiongweimin <xiongweimin@...inos.cn>
This commit adds core functionality for managing RDMA Completion Queues (CQs):
1. CREATE_CQ command handler with resource allocation and initialization
2. DESTROY_CQ command with safe teardown procedures
3. Reference counting for lifecycle management
4. Concurrency control via spinlocks
5. Integration with device resource pools
Key features:
- Strict validation of CQ size against device capabilities
- Atomic state management with `is_dying` flag
- Virtual queue index reset during destruction
- Error logging for allocation failures
- Memory-safe buffer handling with CHK_IOVEC
Signed-off-by: xiongweimin <xiongweimin@...inos.cn>
Change-Id: Ie4b51c90f36a1ceadfe4dbc622dc6fcaaaaf4261
---
examples/vhost_user_rdma/vhost_rdma_ib.c | 59 +++++++++++++++++++++++-
examples/vhost_user_rdma/vhost_rdma_ib.h | 33 ++++++++++++-
2 files changed, 89 insertions(+), 3 deletions(-)
diff --git a/examples/vhost_user_rdma/vhost_rdma_ib.c b/examples/vhost_user_rdma/vhost_rdma_ib.c
index edb6e3fea3..5ec0de8ae7 100644
--- a/examples/vhost_user_rdma/vhost_rdma_ib.c
+++ b/examples/vhost_user_rdma/vhost_rdma_ib.c
@@ -563,13 +563,68 @@ vhost_rdma_query_port(__rte_unused struct vhost_rdma_device *dev,
return 0;
}
+static int
+vhost_rdma_create_cq(struct vhost_rdma_device *dev,
+ struct iovec *in,
+ struct iovec *out)
+{
+ struct vhost_rdma_cmd_create_cq *create_cmd;
+ struct vhost_rdma_ack_create_cq *create_rsp;
+ struct vhost_rdma_cq *cq;
+ uint32_t cqn;
+
+ CHK_IOVEC(create_cmd, in);
+ if (create_cmd->cqe > dev->attr.max_cqe)
+ return -EINVAL;
+
+ CHK_IOVEC(create_rsp, out);
+
+ cq = vhost_rdma_pool_alloc(&dev->cq_pool, &cqn);
+ if (cq == NULL) {
+ RDMA_LOG_ERR("cq alloc failed");
+ }
+ vhost_rdma_ref_init(cq);
+
+ rte_spinlock_init(&cq->cq_lock);
+ cq->is_dying = false;
+ cq->notify = 0;
+ cq->vq = &dev->cq_vqs[cqn];
+ cq->cqn = cqn;
+ create_rsp->cqn = cqn;
+
+ return 0;
+}
+
+static int
+vhost_rdma_destroy_cq(struct vhost_rdma_device *dev, struct iovec *in, CTRL_NO_RSP)
+{
+ struct vhost_rdma_cmd_destroy_cq *destroy_cmd;
+ struct vhost_rdma_cq *cq;
+
+ CHK_IOVEC(destroy_cmd, in);
+
+ cq = vhost_rdma_pool_get(&dev->cq_pool, destroy_cmd->cqn);
+
+ rte_spinlock_lock(&cq->cq_lock);
+ cq->is_dying = true;
+ cq->vq->last_avail_idx = 0;
+ cq->vq->last_used_idx = 0;
+ rte_spinlock_unlock(&cq->cq_lock);
+
+ vhost_rdma_drop_ref(cq, dev, cq);
+
+ return 0;
+}
+
/* Command handler table declaration */
struct {
int (*handler)(struct vhost_rdma_device *dev, struct iovec *in, struct iovec *out);
const char *name; /* Name of the command (for logging) */
} cmd_tbl[] = {
- DEFINE_VIRTIO_RDMA_CMD(VHOST_RDMA_CTRL_ROCE_QUERY_DEVICE, vhost_rdma_query_device),
- DEFINE_VIRTIO_RDMA_CMD(VHOST_RDMA_CTRL_ROCE_QUERY_PORT, vhost_rdma_query_port),
+ DEFINE_VIRTIO_RDMA_CMD(VHOST_RDMA_CTRL_ROCE_QUERY_DEVICE, vhost_rdma_query_device),
+ DEFINE_VIRTIO_RDMA_CMD(VHOST_RDMA_CTRL_ROCE_QUERY_PORT, vhost_rdma_query_port),
+ DEFINE_VIRTIO_RDMA_CMD(VHOST_RDMA_CTRL_ROCE_CREATE_CQ, vhost_rdma_create_cq),
+ DEFINE_VIRTIO_RDMA_CMD(VHOST_RDMA_CTRL_ROCE_DESTROY_CQ, vhost_rdma_destroy_cq),
};
/**
diff --git a/examples/vhost_user_rdma/vhost_rdma_ib.h b/examples/vhost_user_rdma/vhost_rdma_ib.h
index 664067b024..6420c8c7e2 100644
--- a/examples/vhost_user_rdma/vhost_rdma_ib.h
+++ b/examples/vhost_user_rdma/vhost_rdma_ib.h
@@ -31,6 +31,12 @@
#include "eal_interrupts.h"
+#define vhost_rdma_ref_init(obj) \
+ do{\
+ rte_atomic32_init(&(obj)->refcnt); \
+ rte_atomic32_inc(&(obj)->refcnt); \
+ }while(0)
+
/* Forward declarations */
struct vhost_rdma_device;
struct vhost_queue;
@@ -370,7 +376,7 @@ struct vhost_user_rdma_msg {
* @brief Completion Queue (CQ)
*/
struct vhost_rdma_cq {
- struct vhost_queue *vq; /**< Notification V-ring */
+ struct vhost_user_queue *vq; /**< Notification V-ring */
rte_spinlock_t cq_lock; /**< Protect CQ operations */
uint8_t notify; /**< Notify pending flag */
bool is_dying; /**< Being destroyed */
@@ -676,6 +682,31 @@ struct vhost_rdma_ack_query_port {
uint32_t reserved[32]; /* For future extensions */
}__rte_packed;
+struct vhost_rdma_cmd_create_cq {
+ /* Size of CQ */
+ uint32_t cqe;
+};
+
+struct vhost_rdma_ack_create_cq {
+ /* The index of CQ */
+ uint32_t cqn;
+};
+
+struct vhost_rdma_cmd_destroy_cq {
+ /* The index of CQ */
+ uint32_t cqn;
+};
+
+struct vhost_rdma_ack_create_pd {
+ /* The handle of PD */
+ uint32_t pdn;
+};
+
+struct vhost_rdma_cmd_destroy_pd {
+ /* The handle of PD */
+ uint32_t pdn;
+};
+
/**
* @brief Convert IB MTU enum to byte size
* @param mtu The MTU enum value
--
2.43.0
Powered by blists - more mailing lists