[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20251116-frmr_pools-v1-9-5eb3c8f5c9c4@nvidia.com>
Date: Sun, 16 Nov 2025 21:10:30 +0200
From: Edward Srouji <edwards@...dia.com>
To: Jason Gunthorpe <jgg@...pe.ca>, Leon Romanovsky <leon@...nel.org>, "Saeed
Mahameed" <saeedm@...dia.com>, Tariq Toukan <tariqt@...dia.com>, Mark Bloch
<mbloch@...dia.com>, Andrew Lunn <andrew+netdev@...n.ch>, "David S. Miller"
<davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski
<kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>
CC: <linux-kernel@...r.kernel.org>, <linux-rdma@...r.kernel.org>,
<netdev@...r.kernel.org>, Michael Guralnik <michaelgur@...dia.com>, "Edward
Srouji" <edwards@...dia.com>, Patrisious Haddad <phaddad@...dia.com>
Subject: [PATCH rdma-next 9/9] RDMA/core: Add command to set pinned FRMR
handles
From: Michael Guralnik <michaelgur@...dia.com>
Allow users to set through netlink, for a specific FRMR pool, the amount
of handles that are not aged, and fill the pool to this amount.
This allows users to warm-up the FRMR pools to an expected amount of
handles with specific attributes that fits their expected usage.
Signed-off-by: Michael Guralnik <michaelgur@...dia.com>
Reviewed-by: Patrisious Haddad <phaddad@...dia.com>
Signed-off-by: Edward Srouji <edwards@...dia.com>
---
drivers/infiniband/core/frmr_pools.c | 1 +
drivers/infiniband/core/nldev.c | 66 +++++++++++++++++++++++++++++++++---
include/uapi/rdma/rdma_netlink.h | 1 +
3 files changed, 63 insertions(+), 5 deletions(-)
diff --git a/drivers/infiniband/core/frmr_pools.c b/drivers/infiniband/core/frmr_pools.c
index b150bb78de3c4fd89990f7aed7874e4db94eac0a..9a27ff2d9aec20b415c187909ba660a94590b2d7 100644
--- a/drivers/infiniband/core/frmr_pools.c
+++ b/drivers/infiniband/core/frmr_pools.c
@@ -452,6 +452,7 @@ int ib_frmr_pools_set_pinned(struct ib_device *device, struct ib_frmr_key *key,
kfree(handles);
schedule_aging:
+ /* Ensure aging is scheduled to adjust to new pinned handles count */
mod_delayed_work(pools->aging_wq, &pool->aging_work, 0);
return ret;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index e22c999d164120ac070b435e92f53c15f976bf5c..5c8a4e19fdf8e82e78237d4e6ced9c519613505e 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -185,6 +185,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE] = { .type = NLA_U64 },
[RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE] = { .type = NLA_U64 },
[RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES] = { .type = NLA_U32 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -2692,6 +2693,9 @@ static int fill_frmr_pool_entry(struct sk_buff *msg, struct ib_frmr_pool *pool)
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE,
pool->in_use, RDMA_NLDEV_ATTR_PAD))
goto err_unlock;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES,
+ pool->pinned_handles))
+ goto err_unlock;
spin_unlock(&pool->lock);
return 0;
@@ -2789,6 +2793,54 @@ static int nldev_frmr_pools_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return ret;
}
+static void nldev_frmr_pools_parse_key(struct nlattr *tb[],
+ struct ib_frmr_key *key,
+ struct netlink_ext_ack *extack)
+{
+ if (tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS])
+ key->ats = nla_get_u8(tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS]);
+
+ if (tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS])
+ key->access_flags = nla_get_u32(
+ tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS]);
+
+ if (tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY])
+ key->vendor_key = nla_get_u64(
+ tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY]);
+
+ if (tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS])
+ key->num_dma_blocks = nla_get_u64(
+ tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS]);
+}
+
+static int nldev_frmr_pools_set_pinned(struct ib_device *device,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *key_tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_frmr_key key = { 0 };
+ u32 pinned_handles = 0;
+ int err = 0;
+
+ pinned_handles =
+ nla_get_u32(tb[RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES]);
+
+ if (!tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY])
+ return -EINVAL;
+
+ err = nla_parse_nested(key_tb, RDMA_NLDEV_ATTR_MAX - 1,
+ tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY], nldev_policy,
+ extack);
+ if (err)
+ return err;
+
+ nldev_frmr_pools_parse_key(key_tb, &key, extack);
+
+ err = ib_frmr_pools_set_pinned(device, &key, pinned_handles);
+
+ return err;
+}
+
static int nldev_frmr_pools_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -2904,18 +2956,22 @@ static int nldev_frmr_pools_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
- if (!tb[RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD])
- return -EINVAL;
-
device = ib_device_get_by_index(
sock_net(skb->sk), nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]));
if (!device)
return -EINVAL;
- aging_period = nla_get_u32(tb[RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD]);
+ if (tb[RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD]) {
+ aging_period = nla_get_u32(
+ tb[RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD]);
+ err = ib_frmr_pools_set_aging_period(device, aging_period);
+ goto done;
+ }
- err = ib_frmr_pools_set_aging_period(device, aging_period);
+ if (tb[RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES])
+ err = nldev_frmr_pools_set_pinned(device, tb, extack);
+done:
ib_device_put(device);
return err;
}
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index f9c295caf2b1625e3636d4279a539d481fdeb4ac..39178df104f01d19a8135554adece66be881fd15 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -601,6 +601,7 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE, /* u64 */
RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE, /* u64 */
RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD, /* u32 */
+ RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES, /* u32 */
/*
* Always the end
--
2.47.1
Powered by blists - more mailing lists