[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250911110303.GS341237@unreal>
Date: Thu, 11 Sep 2025 14:03:03 +0300
From: Leon Romanovsky <leonro@...dia.com>
To: Stephen Rothwell <sfr@...b.auug.org.au>
CC: Jason Gunthorpe <jgg@...dia.com>, Kalesh AP
<kalesh-anakkur.purayil@...adcom.com>, Linux Kernel Mailing List
<linux-kernel@...r.kernel.org>, Linux Next Mailing List
<linux-next@...r.kernel.org>, Saravanan Vajravel
<saravanan.vajravel@...adcom.com>
Subject: Re: linux-next: manual merge of the rdma tree with Linus' tree
On Thu, Sep 11, 2025 at 12:23:30PM +1000, Stephen Rothwell wrote:
> Hi all,
>
> Today's linux-next merge of the rdma tree got a conflict in:
>
> drivers/infiniband/hw/bnxt_re/main.c
>
> between commit:
>
> ba60a1e8cbbd ("RDMA/bnxt_re: Fix a possible memory leak in the driver")
>
> from Linus' tree and commits:
>
> bebe1a1bb1cf ("RDMA/bnxt_re: Refactor stats context memory allocation")
> b8f4e7f1a275 ("RDMA/bnxt_re: Add support for unique GID")
>
> from the rdma tree.
>
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging. You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
>
> --
> Cheers,
> Stephen Rothwell
>
> diff --cc drivers/infiniband/hw/bnxt_re/main.c
> index df7cf8d68e27,d8d3999d329e..000000000000
> --- a/drivers/infiniband/hw/bnxt_re/main.c
> +++ b/drivers/infiniband/hw/bnxt_re/main.c
> @@@ -2017,28 -2046,72 +2046,94 @@@ static void bnxt_re_free_nqr_mem(struc
> rdev->nqr = NULL;
> }
>
> +/* When DEL_GID fails, driver is not freeing GID ctx memory.
> + * To avoid the memory leak, free the memory during unload
> + */
> +static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
> +{
> + struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
> + struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
> + int i;
> +
> + if (!sgid_tbl->active)
> + return;
> +
> + ctx_tbl = sgid_tbl->ctx;
> + for (i = 0; i < sgid_tbl->max; i++) {
> + if (sgid_tbl->hw_id[i] == 0xFFFF)
> + continue;
> +
> + ctx = ctx_tbl[i];
> + kfree(ctx);
> + }
> +}
> +
> + static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev)
> + {
> + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
> + struct bnxt_qplib_res *res = &rdev->qplib_res;
> + int rc;
> +
> + rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats);
> + if (rc)
> + return rc;
> +
> + rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats);
> + if (rc)
> + goto free_stat_mem;
> +
> + return 0;
> + free_stat_mem:
> + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
> +
> + return rc;
> + }
> +
> + static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev)
> + {
> + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
> + struct bnxt_qplib_res *res = &rdev->qplib_res;
> + int rc;
> +
> + if (!rdev->rcfw.roce_mirror)
> + return 0;
> +
> + rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3);
> + if (rc)
> + return rc;
> +
> + rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3);
> + if (rc)
> + goto free_stat_mem;
> +
> + return 0;
> + free_stat_mem:
> + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
> +
> + return rc;
> + }
> +
> + static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev)
> + {
> + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
> + struct bnxt_qplib_res *res = &rdev->qplib_res;
> +
> + if (!rdev->rcfw.roce_mirror)
> + return;
> +
> + bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id);
> + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
> + }
> +
> + static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev)
> + {
> + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
> + struct bnxt_qplib_res *res = &rdev->qplib_res;
> +
> + bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id);
> + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
> + }
> +
> static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
> {
> u8 type;
> @@@ -2049,10 -2122,8 +2144,9 @@@
> bnxt_re_net_unregister_async_event(rdev);
> bnxt_re_uninit_dcb_wq(rdev);
>
> - if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
> - cancel_delayed_work_sync(&rdev->worker);
> + bnxt_re_put_stats3_ctx(rdev);
>
> + bnxt_re_free_gid_ctx(rdev);
> if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
> &rdev->flags))
> bnxt_re_cleanup_res(rdev);
Looks right, thanks.
Powered by blists - more mailing lists