[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201228125023.098950328@linuxfoundation.org>
Date: Mon, 28 Dec 2020 13:40:42 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Leon Romanovsky <leonro@...dia.com>,
Jason Gunthorpe <jgg@...dia.com>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.10 044/717] RDMA/mlx5: Fix corruption of reg_pages in mlx5_ib_rereg_user_mr()
From: Jason Gunthorpe <jgg@...dia.com>
[ Upstream commit fc3325701a6353594083f08e297d4c1965c601aa ]
reg_pages should always contain mr->npage since when the mr is finally
de-reg'd it is always subtracted out.
If there were any error exits then mlx5_ib_rereg_user_mr() would leave the
reg_pages adjusted and this will cause it to be double subtracted
eventually.
The manipulation of reg_pages is inherently connected to the umem, so lift
it out of set_mr_fields() and only adjust it around creating/destroying a
umem.
reg_pages is only used for diagnostics in sysfs.
Fixes: 7d0cc6edcc70 ("IB/mlx5: Add MR cache for large UMR regions")
Link: https://lore.kernel.org/r/20201026131936.1335664-3-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@...dia.com>
Signed-off-by: Jason Gunthorpe <jgg@...dia.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/infiniband/hw/mlx5/mr.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index b261797b258fd..3468ae804eaee 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1247,10 +1247,8 @@ err_1:
}
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- int npages, u64 length, int access_flags)
+ u64 length, int access_flags)
{
- mr->npages = npages;
- atomic_add(npages, &dev->mdev->priv.reg_pages);
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->ibmr.length = length;
@@ -1290,8 +1288,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
kfree(in);
- mr->umem = NULL;
- set_mr_fields(dev, mr, 0, length, acc);
+ set_mr_fields(dev, mr, length, acc);
return &mr->ibmr;
@@ -1419,7 +1416,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
mr->umem = umem;
- set_mr_fields(dev, mr, npages, length, access_flags);
+ mr->npages = npages;
+ atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
+ set_mr_fields(dev, mr, length, access_flags);
if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
/*
@@ -1531,8 +1530,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
- atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
-
if (!mr->umem)
return -EINVAL;
@@ -1553,12 +1550,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* used.
*/
flags |= IB_MR_REREG_TRANS;
+ atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+ mr->npages = 0;
ib_umem_release(mr->umem);
mr->umem = NULL;
+
err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order);
if (err)
goto err;
+ mr->npages = ncont;
+ atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
}
if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
@@ -1609,7 +1611,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err;
}
- set_mr_fields(dev, mr, npages, len, access_flags);
+ set_mr_fields(dev, mr, len, access_flags);
return 0;
--
2.27.0
Powered by blists - more mailing lists