[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251116-frmr_pools-v1-7-5eb3c8f5c9c4@nvidia.com>
Date: Sun, 16 Nov 2025 21:10:28 +0200
From: Edward Srouji <edwards@...dia.com>
To: Jason Gunthorpe <jgg@...pe.ca>, Leon Romanovsky <leon@...nel.org>, "Saeed
Mahameed" <saeedm@...dia.com>, Tariq Toukan <tariqt@...dia.com>, Mark Bloch
<mbloch@...dia.com>, Andrew Lunn <andrew+netdev@...n.ch>, "David S. Miller"
<davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski
<kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>
CC: <linux-kernel@...r.kernel.org>, <linux-rdma@...r.kernel.org>,
<netdev@...r.kernel.org>, Michael Guralnik <michaelgur@...dia.com>, "Edward
Srouji" <edwards@...dia.com>, Patrisious Haddad <phaddad@...dia.com>
Subject: [PATCH rdma-next 7/9] RDMA/nldev: Add command to get FRMR pools
From: Michael Guralnik <michaelgur@...dia.com>
Add support for a new command in netlink to dump to user the state of
the FRMR pools on the devices.
Expose each pool with its key and the usage statistics for it.
Signed-off-by: Michael Guralnik <michaelgur@...dia.com>
Reviewed-by: Patrisious Haddad <phaddad@...dia.com>
Signed-off-by: Edward Srouji <edwards@...dia.com>
---
drivers/infiniband/core/nldev.c | 254 +++++++++++++++++++++++++++++++++++++++
include/uapi/rdma/rdma_netlink.h | 17 +++
2 files changed, 271 insertions(+)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 2220a2dfab240eaef2eb64d8e45cb221dfa25614..6cdf6073fdf9c51ee291a63bb86ac690b094aa9f 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -37,11 +37,13 @@
#include <net/netlink.h>
#include <rdma/rdma_cm.h>
#include <rdma/rdma_netlink.h>
+#include <rdma/frmr_pools.h>
#include "core_priv.h"
#include "cma_priv.h"
#include "restrack.h"
#include "uverbs.h"
+#include "frmr_pools.h"
/*
* This determines whether a non-privileged user is allowed to specify a
@@ -172,6 +174,16 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_EVENT_TYPE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_FRMR_POOLS] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_KEY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE] = { .type = NLA_U64 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -2637,6 +2649,244 @@ static int nldev_deldev(struct sk_buff *skb, struct nlmsghdr *nlh,
return ib_del_sub_device_and_put(device);
}
+static int fill_frmr_pool_key(struct sk_buff *msg, struct ib_frmr_key *key)
+{
+ struct nlattr *key_attr;
+
+ key_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY);
+ if (!key_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS, key->ats))
+ goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS,
+ key->access_flags))
+ goto err;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY,
+ key->vendor_key, RDMA_NLDEV_ATTR_PAD))
+ goto err;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS,
+ key->num_dma_blocks, RDMA_NLDEV_ATTR_PAD))
+ goto err;
+
+ nla_nest_end(msg, key_attr);
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_frmr_pool_entry(struct sk_buff *msg, struct ib_frmr_pool *pool)
+{
+ if (fill_frmr_pool_key(msg, &pool->key))
+ return -EMSGSIZE;
+
+ spin_lock(&pool->lock);
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES,
+ pool->queue.ci + pool->inactive_queue.ci))
+ goto err_unlock;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE,
+ pool->max_in_use, RDMA_NLDEV_ATTR_PAD))
+ goto err_unlock;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE,
+ pool->in_use, RDMA_NLDEV_ATTR_PAD))
+ goto err_unlock;
+ spin_unlock(&pool->lock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock(&pool->lock);
+ return -EMSGSIZE;
+}
+
+static int fill_frmr_pools_info(struct sk_buff *msg, struct ib_device *device)
+{
+ struct ib_frmr_pools *pools = device->frmr_pools;
+ struct ib_frmr_pool *pool;
+ struct nlattr *table_attr;
+ struct rb_node *node;
+
+ if (!pools)
+ return 0;
+
+ read_lock(&pools->rb_lock);
+ if (RB_EMPTY_ROOT(&pools->rb_root)) {
+ read_unlock(&pools->rb_lock);
+ return 0;
+ }
+ read_unlock(&pools->rb_lock);
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_FRMR_POOLS);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ read_lock(&pools->rb_lock);
+ for (node = rb_first(&pools->rb_root); node; node = rb_next(node)) {
+ pool = rb_entry(node, struct ib_frmr_pool, node);
+ if (fill_frmr_pool_entry(msg, pool))
+ goto err;
+ }
+ read_unlock(&pools->rb_lock);
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ read_unlock(&pools->rb_lock);
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int nldev_frmr_pools_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ int ret;
+
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ NL_VALIDATE_LIBERAL, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), index);
+ if (!device)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_FRMR_POOLS_GET),
+ 0, 0);
+ if (!nlh || fill_nldev_handle(msg, device)) {
+ ret = -EMSGSIZE;
+ goto err_free;
+ }
+
+ ret = fill_frmr_pools_info(msg, device);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
+static int nldev_frmr_pools_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_frmr_pools *pools;
+ int err, ret = 0, idx = 0;
+ struct ib_frmr_pool *pool;
+ struct nlattr *table_attr;
+ struct nlattr *entry_attr;
+ struct ib_device *device;
+ int start = cb->args[0];
+ struct rb_node *node;
+ struct nlmsghdr *nlh;
+ bool filled = false;
+
+ err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ device = ib_device_get_by_index(
+ sock_net(skb->sk), nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]));
+ if (!device)
+ return -EINVAL;
+
+ pools = device->frmr_pools;
+ if (!pools) {
+ ib_device_put(device);
+ return 0;
+ }
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_FRMR_POOLS_GET),
+ 0, NLM_F_MULTI);
+
+ if (!nlh || fill_nldev_handle(skb, device)) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ table_attr = nla_nest_start_noflag(skb, RDMA_NLDEV_ATTR_FRMR_POOLS);
+ if (!table_attr) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ read_lock(&pools->rb_lock);
+ for (node = rb_first(&pools->rb_root); node; node = rb_next(node)) {
+ pool = rb_entry(node, struct ib_frmr_pool, node);
+ if (pool->key.kernel_vendor_key)
+ continue;
+
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+
+ filled = true;
+
+ entry_attr = nla_nest_start_noflag(
+ skb, RDMA_NLDEV_ATTR_FRMR_POOL_ENTRY);
+ if (!entry_attr) {
+ ret = -EMSGSIZE;
+ goto end_msg;
+ }
+
+ if (fill_frmr_pool_entry(skb, pool)) {
+ nla_nest_cancel(skb, entry_attr);
+ ret = -EMSGSIZE;
+ goto end_msg;
+ }
+
+ nla_nest_end(skb, entry_attr);
+ idx++;
+ }
+end_msg:
+ read_unlock(&pools->rb_lock);
+
+ nla_nest_end(skb, table_attr);
+ nlmsg_end(skb, nlh);
+ cb->args[0] = idx;
+
+ /*
+ * No more entries to fill, cancel the message and
+ * return 0 to mark end of dumpit.
+ */
+ if (!filled)
+ goto err;
+
+ ib_device_put(device);
+ return skb->len;
+
+err:
+ nlmsg_cancel(skb, nlh);
+ ib_device_put(device);
+ return ret;
+}
+
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
@@ -2743,6 +2993,10 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_deldev,
.flags = RDMA_NL_ADMIN_PERM,
},
+ [RDMA_NLDEV_CMD_FRMR_POOLS_GET] = {
+ .doit = nldev_frmr_pools_get_doit,
+ .dump = nldev_frmr_pools_get_dumpit,
+ },
};
static int fill_mon_netdev_rename(struct sk_buff *msg,
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index f41f0228fcd0e0b74e74b4d87611546b00f799a1..8f17ffe0190cb86131109209c45caec155ab36da 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -308,6 +308,8 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_MONITOR,
+ RDMA_NLDEV_CMD_FRMR_POOLS_GET, /* can dump */
+
RDMA_NLDEV_NUM_OPS
};
@@ -582,6 +584,21 @@ enum rdma_nldev_attr {
RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, /* u8 */
RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED, /* u8 */
+
+ /*
+ * FRMR Pools attributes
+ */
+ RDMA_NLDEV_ATTR_FRMR_POOLS, /* nested table */
+ RDMA_NLDEV_ATTR_FRMR_POOL_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_FRMR_POOL_KEY, /* nested table */
+ RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS, /* u8 */
+ RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS, /* u32 */
+ RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY, /* u64 */
+ RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS, /* u64 */
+ RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES, /* u32 */
+ RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE, /* u64 */
+ RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE, /* u64 */
+
/*
* Always the end
*/
--
2.47.1
Powered by blists - more mailing lists