[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ea12fb42-97e4-0321-da0d-049a0650db9f@fujitsu.com>
Date: Wed, 29 Sep 2021 04:37:22 +0000
From: "lizhijian@...itsu.com" <lizhijian@...itsu.com>
To: Jason Gunthorpe <jgg@...dia.com>,
"lizhijian@...itsu.com" <lizhijian@...itsu.com>
CC: "leon@...nel.org" <leon@...nel.org>,
"dledford@...hat.com" <dledford@...hat.com>,
"linux-rdma@...r.kernel.org" <linux-rdma@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] IB/mlx5: unify return value to ENOENT
So sorry, I missed this reply at previous ping
On 29/09/2021 01:08, Jason Gunthorpe wrote:
> On Fri, Sep 03, 2021 at 04:48:15PM +0800, Li Zhijian wrote:
>> Previously, ENOENT or EINVAL will be returned by ibv_advise_mr() although
>> the errors all occur at get_prefetchable_mr().
> What do you think about this instead?
Thank you, it's much better.
Thanks
Zhijian
>
> From b739920ed4869decb02a0dbc58256e6c72ec7061 Mon Sep 17 00:00:00 2001
> From: Jason Gunthorpe <jgg@...dia.com>
> Date: Fri, 3 Sep 2021 16:48:15 +0800
> Subject: [PATCH] IB/mlx5: Flow through a more detailed return code from
> get_prefetchable_mr()
>
> The error returns for various cases detected by get_prefetchable_mr() get
> confused as it flows back to userspace. Properly label each error path and
> flow the error code properly back to the system call.
>
> Suggested-by: Li Zhijian <lizhijian@...fujitsu.com>
> Signed-off-by: Jason Gunthorpe <jgg@...dia.com>
> ---
> drivers/infiniband/hw/mlx5/odp.c | 40 ++++++++++++++++++++------------
> 1 file changed, 25 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
> index d0d98e584ebcc3..77890a85fc2dd3 100644
> --- a/drivers/infiniband/hw/mlx5/odp.c
> +++ b/drivers/infiniband/hw/mlx5/odp.c
> @@ -1708,20 +1708,26 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
>
> xa_lock(&dev->odp_mkeys);
> mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
> - if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
> + if (!mmkey || mmkey->key != lkey) {
> + mr = ERR_PTR(-ENOENT);
> goto end;
> + }
> + if (mmkey->type != MLX5_MKEY_MR) {
> + mr = ERR_PTR(-EINVAL);
> + goto end;
> + }
>
> mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
>
> if (mr->ibmr.pd != pd) {
> - mr = NULL;
> + mr = ERR_PTR(-EPERM);
> goto end;
> }
>
> /* prefetch with write-access must be supported by the MR */
> if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
> !mr->umem->writable) {
> - mr = NULL;
> + mr = ERR_PTR(-EPERM);
> goto end;
> }
>
> @@ -1753,7 +1759,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
> destroy_prefetch_work(work);
> }
>
> -static bool init_prefetch_work(struct ib_pd *pd,
> +static int init_prefetch_work(struct ib_pd *pd,
> enum ib_uverbs_advise_mr_advice advice,
> u32 pf_flags, struct prefetch_mr_work *work,
> struct ib_sge *sg_list, u32 num_sge)
> @@ -1764,17 +1770,19 @@ static bool init_prefetch_work(struct ib_pd *pd,
> work->pf_flags = pf_flags;
>
> for (i = 0; i < num_sge; ++i) {
> + struct mlx5_ib_mr *mr;
> +
> + mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
> + if (IS_ERR(mr)) {
> + work->num_sge = i;
> + return PTR_ERR(mr);
> + }
> work->frags[i].io_virt = sg_list[i].addr;
> work->frags[i].length = sg_list[i].length;
> - work->frags[i].mr =
> - get_prefetchable_mr(pd, advice, sg_list[i].lkey);
> - if (!work->frags[i].mr) {
> - work->num_sge = i;
> - return false;
> - }
> + work->frags[i].mr = mr;
> }
> work->num_sge = num_sge;
> - return true;
> + return 0;
> }
>
> static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
> @@ -1790,8 +1798,8 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
> struct mlx5_ib_mr *mr;
>
> mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
> - if (!mr)
> - return -ENOENT;
> + if (IS_ERR(mr))
> + return PTR_ERR(mr);
> ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
> &bytes_mapped, pf_flags);
> if (ret < 0) {
> @@ -1811,6 +1819,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
> {
> u32 pf_flags = 0;
> struct prefetch_mr_work *work;
> + int rc;
>
> if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
> pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
> @@ -1826,9 +1835,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
> if (!work)
> return -ENOMEM;
>
> - if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
> + rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
> + if (rc) {
> destroy_prefetch_work(work);
> - return -EINVAL;
> + return rc;
> }
> queue_work(system_unbound_wq, &work->work);
> return 0;
Powered by blists - more mailing lists