[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211222101312.1358616-1-maorg@nvidia.com>
Date: Wed, 22 Dec 2021 12:13:12 +0200
From: Maor Gottlieb <maorg@...dia.com>
To: jgg@...dia.com
Cc: alaa@...dia.com, chuck.lever@...cle.com,
linux-kernel@...r.kernel.org, linux-rdma@...r.kernel.org,
tonylu@...ux.alibaba.com, leon@...nel.org
Subject: [PATCH rdma-rc] Revert "RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow"
This patch is not the full fix and still causes to call traces
during mlx5_ib_dereg_mr().
This reverts commit f0ae4afe3d35e67db042c58a52909e06262b740f.
Fixes: f0ae4afe3d35 ("RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow")
Signed-off-by: Maor Gottlieb <maorg@...dia.com>
---
drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 +++---
drivers/infiniband/hw/mlx5/mr.c | 26 ++++++++++++++------------
2 files changed, 17 insertions(+), 15 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 4a7a56ed740b..e636e954f6bf 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -664,6 +664,7 @@ struct mlx5_ib_mr {
/* User MR data */
struct mlx5_cache_ent *cache_ent;
+ struct ib_umem *umem;
/* This is zero'd when the MR is allocated */
union {
@@ -675,7 +676,7 @@ struct mlx5_ib_mr {
struct list_head list;
};
- /* Used only by kernel MRs */
+ /* Used only by kernel MRs (umem == NULL) */
struct {
void *descs;
void *descs_alloc;
@@ -696,9 +697,8 @@ struct mlx5_ib_mr {
int data_length;
};
- /* Used only by User MRs */
+ /* Used only by User MRs (umem != NULL) */
struct {
- struct ib_umem *umem;
unsigned int page_shift;
/* Current access_flags */
int access_flags;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 63e2129f1142..157d862fb864 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1904,18 +1904,19 @@ mlx5_alloc_priv_descs(struct ib_device *device,
return ret;
}
-static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+static void
+mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- int size = mr->max_descs * mr->desc_size;
-
- if (!mr->descs)
- return;
+ if (!mr->umem && mr->descs) {
+ struct ib_device *device = mr->ibmr.device;
+ int size = mr->max_descs * mr->desc_size;
+ struct mlx5_ib_dev *dev = to_mdev(device);
- dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
- DMA_TO_DEVICE);
- kfree(mr->descs_alloc);
- mr->descs = NULL;
+ dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
+ DMA_TO_DEVICE);
+ kfree(mr->descs_alloc);
+ mr->descs = NULL;
+ }
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1991,8 +1992,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr);
} else {
- if (!udata)
- mlx5_free_priv_descs(mr);
+ mlx5_free_priv_descs(mr);
kfree(mr);
}
return 0;
@@ -2079,6 +2079,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
if (err)
goto err_free_in;
+ mr->umem = NULL;
kfree(in);
return mr;
@@ -2205,6 +2206,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
}
mr->ibmr.device = pd->device;
+ mr->umem = NULL;
switch (mr_type) {
case IB_MR_TYPE_MEM_REG:
--
2.25.4
Powered by blists - more mailing lists