[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <8db24ef3234a6d67a12bbf3137e763cd817df5c8.1654601897.git.leonro@nvidia.com>
Date: Tue, 7 Jun 2022 14:40:13 +0300
From: Leon Romanovsky <leon@...nel.org>
To: Jason Gunthorpe <jgg@...dia.com>
Cc: Aharon Landau <aharonl@...dia.com>, linux-rdma@...r.kernel.org,
netdev@...r.kernel.org, Saeed Mahameed <saeedm@...dia.com>
Subject: [PATCH rdma-next 3/5] RDMA/mlx5: Store the number of in_use cache mkeys instead of total_mrs
From: Aharon Landau <aharonl@...dia.com>
total_mrs is used only to calculate the number of mkeys currently in
use. To simplify things, replace it with a new member called "in_use"
and directly store the number of mkeys currently in use.
Signed-off-by: Aharon Landau <aharonl@...dia.com>
Signed-off-by: Leon Romanovsky <leonro@...dia.com>
---
drivers/infiniband/hw/mlx5/mlx5_ib.h | 4 +---
drivers/infiniband/hw/mlx5/mr.c | 30 ++++++++++++++--------------
2 files changed, 16 insertions(+), 18 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 500f1a231106..47515dc27b51 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -749,12 +749,10 @@ struct mlx5_cache_ent {
u8 fill_to_high_water:1;
/*
- * - total_mrs is stored mkeys plus all in use MRs that could be
- * returned to the cache.
* - limit is the low water mark for stored mkeys, 2* limit is the
* upper water mark.
*/
- u32 total_mrs;
+ u32 in_use;
u32 limit;
/* Statistics */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9cd34d6817b3..80672d275d77 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -259,7 +259,6 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
xa_lock_irqsave(&ent->mkeys, flags);
push_to_reserved(ent, mr);
- ent->total_mrs++;
/* If we are doing fill_to_high_water then keep going. */
queue_adjust_cache_locked(ent);
xa_unlock_irqrestore(&ent->mkeys, flags);
@@ -382,9 +381,6 @@ static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
init_waitqueue_head(&mr->mmkey.wait);
mr->mmkey.type = MLX5_MKEY_MR;
WRITE_ONCE(ent->dev->cache.last_add, jiffies);
- xa_lock_irq(&ent->mkeys);
- ent->total_mrs++;
- xa_unlock_irq(&ent->mkeys);
kfree(in);
return mr;
free_mr:
@@ -402,7 +398,6 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
if (!ent->stored)
return;
mr = pop_stored_mkey(ent);
- ent->total_mrs--;
xa_unlock_irq(&ent->mkeys);
mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key);
kfree(mr);
@@ -458,11 +453,11 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
* mkeys.
*/
xa_lock_irq(&ent->mkeys);
- if (target < ent->total_mrs - ent->stored) {
+ if (target < ent->in_use) {
err = -EINVAL;
goto err_unlock;
}
- target = target - (ent->total_mrs - ent->stored);
+ target = target - ent->in_use;
if (target < ent->limit || target > ent->limit*2) {
err = -EINVAL;
goto err_unlock;
@@ -486,7 +481,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
+ err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use);
if (err < 0)
return err;
@@ -680,13 +675,19 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
return ERR_PTR(-EOPNOTSUPP);
xa_lock_irq(&ent->mkeys);
+ ent->in_use++;
+
if (!ent->stored) {
queue_adjust_cache_locked(ent);
ent->miss++;
xa_unlock_irq(&ent->mkeys);
mr = create_cache_mr(ent);
- if (IS_ERR(mr))
+ if (IS_ERR(mr)) {
+ xa_lock_irq(&ent->mkeys);
+ ent->in_use--;
+ xa_unlock_irq(&ent->mkeys);
return mr;
+ }
} else {
mr = pop_stored_mkey(ent);
queue_adjust_cache_locked(ent);
@@ -718,7 +719,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
xa_lock_irq(&ent->mkeys);
while (ent->stored) {
mr = pop_stored_mkey(ent);
- ent->total_mrs--;
xa_unlock_irq(&ent->mkeys);
mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
kfree(mr);
@@ -1643,13 +1643,13 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
/* Stop DMA */
if (mr->cache_ent) {
+ xa_lock_irq(&mr->cache_ent->mkeys);
+ mr->cache_ent->in_use--;
+ xa_unlock_irq(&mr->cache_ent->mkeys);
+
if (mlx5r_umr_revoke_mr(mr) ||
- push_reserve_mkey(mr->cache_ent, false)) {
- xa_lock_irq(&mr->cache_ent->mkeys);
- mr->cache_ent->total_mrs--;
- xa_unlock_irq(&mr->cache_ent->mkeys);
+ push_reserve_mkey(mr->cache_ent, false))
mr->cache_ent = NULL;
- }
}
if (!mr->cache_ent) {
rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
--
2.36.1
Powered by blists - more mailing lists