[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260204135813.870538-1-kotaranov@linux.microsoft.com>
Date: Wed, 4 Feb 2026 05:58:13 -0800
From: Konstantin Taranov <kotaranov@...ux.microsoft.com>
To: kotaranov@...rosoft.com,
shirazsaleem@...rosoft.com,
longli@...rosoft.com,
jgg@...pe.ca,
leon@...nel.org
Cc: linux-rdma@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH rdma-next 1/1] RDMA/mana_ib: return PD number to the user
From: Konstantin Taranov <kotaranov@...rosoft.com>
Implement returning to userspace applications PDNs of created PDs.
Allow users to request short PDNs which are 16 bits.
Signed-off-by: Konstantin Taranov <kotaranov@...rosoft.com>
---
drivers/infiniband/hw/mana/main.c | 19 ++++++++++++++++++-
include/net/mana/gdma.h | 4 ++--
include/uapi/rdma/mana-abi.h | 14 ++++++++++++++
3 files changed, 34 insertions(+), 3 deletions(-)
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index fac159f71..7ee4493cb 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -69,9 +69,11 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct mana_ib_alloc_pd_resp cmd_resp = {};
struct ib_device *ibdev = ibpd->device;
struct gdma_create_pd_resp resp = {};
struct gdma_create_pd_req req = {};
+ struct mana_ib_alloc_pd ucmd = {};
enum gdma_pd_flags flags = 0;
struct mana_ib_dev *dev;
struct gdma_context *gc;
@@ -83,8 +85,15 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
sizeof(resp));
- if (!udata)
+ if (!udata) {
flags |= GDMA_PD_FLAG_ALLOW_GPA_MR;
+ } else {
+ err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
+ if (err)
+ return err;
+ if (ucmd.flags & MANA_IB_PD_SHORT_PDN)
+ flags |= GDMA_PD_FLAG_SHORT_PDN;
+ }
req.flags = flags;
err = mana_gd_send_request(gc, sizeof(req), &req,
@@ -107,6 +116,14 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
mutex_init(&pd->vport_mutex);
pd->vport_use_count = 0;
+
+ if (udata) {
+ cmd_resp.pdn = resp.pd_id;
+ err = ib_copy_to_udata(udata, &cmd_resp, min(sizeof(cmd_resp), udata->outlen));
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 8649eb789..cebb9b2bd 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -824,8 +824,8 @@ struct gdma_destroy_dma_region_req {
}; /* HW DATA */
enum gdma_pd_flags {
- GDMA_PD_FLAG_INVALID = 0,
- GDMA_PD_FLAG_ALLOW_GPA_MR = 1,
+ GDMA_PD_FLAG_ALLOW_GPA_MR = BIT(0),
+ GDMA_PD_FLAG_SHORT_PDN = BIT(2),
};
struct gdma_create_pd_req {
diff --git a/include/uapi/rdma/mana-abi.h b/include/uapi/rdma/mana-abi.h
index a75bf32b8..88b24ae50 100644
--- a/include/uapi/rdma/mana-abi.h
+++ b/include/uapi/rdma/mana-abi.h
@@ -87,4 +87,18 @@ struct mana_ib_create_qp_rss_resp {
struct rss_resp_entry entries[64];
};
+enum mana_ib_create_pd_flags {
+ MANA_IB_PD_SHORT_PDN = 1 << 0,
+};
+
+struct mana_ib_alloc_pd {
+ __u32 flags;
+ __u32 reserved;
+};
+
+struct mana_ib_alloc_pd_resp {
+ __u32 pdn;
+ __u32 reserved;
+};
+
#endif
--
2.43.0
Powered by blists - more mailing lists