[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YQg4lehajLpQjyPd@Red>
Date: Mon, 2 Aug 2021 20:25:25 +0200
From: Corentin Labbe <clabbe.montjoie@...il.com>
To: Dongliang Mu <mudongliangabcd@...il.com>
Cc: Herbert Xu <herbert@...dor.apana.org.au>,
"David S. Miller" <davem@...emloft.net>,
Maxime Ripard <mripard@...nel.org>,
Chen-Yu Tsai <wens@...e.org>,
Jernej Skrabec <jernej.skrabec@...il.com>,
Ard Biesheuvel <ardb@...nel.org>,
Jonathan Corbet <corbet@....net>,
Eric Biggers <ebiggers@...gle.com>,
Xiang Chen <chenxiang66@...ilicon.com>,
Mauro Carvalho Chehab <mchehab+huawei@...nel.org>,
Corentin Labbe <clabbe@...libre.com>,
"Jason A. Donenfeld" <Jason@...c4.com>,
linux-crypto@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-sunxi@...ts.linux.dev, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] crypto: sun8i-ce: fix multiple memory leaks in
sun8i_ce_hash_run
Le Mon, Jul 26, 2021 at 11:27:12PM +0800, Dongliang Mu a écrit :
> In sun8i_ce_hash_run, all the dma_mmap_sg/single will cause memory leak
> due to no corresponding unmap operation if errors happen.
>
> Fix this by adding error handling part for all the dma_mmap_sg/single.
>
I think it could be better worded, error handling is already there (but bad).
> Fixes: 56f6d5aee88d ("crypto: sun8i-ce - support hash algorithms")
> Signed-off-by: Dongliang Mu <mudongliangabcd@...il.com>
> ---
> .../crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 28 +++++++++----------
> 1 file changed, 13 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
> index 88194718a806..d454ad99deee 100644
> --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
> +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
> @@ -286,16 +286,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
>
> /* the padding could be up to two block. */
> buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
> - if (!buf) {
> - err = -ENOMEM;
> - goto theend;
Please keep all goto error for being consistent.
> - }
> + if (!buf)
> + return -ENOMEM;
> bf = (__le32 *)buf;
>
> result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
> if (!result) {
> - err = -ENOMEM;
> - goto theend;
> + kfree(buf);
> + return -ENOMEM;
> }
>
> flow = rctx->flow;
> @@ -321,7 +319,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
> if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
> dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
> err = -EINVAL;
> - goto theend;
> + goto err_result;
> }
>
> len = areq->nbytes;
> @@ -334,7 +332,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
> if (len > 0) {
> dev_err(ce->dev, "remaining len %d\n", len);
> err = -EINVAL;
> - goto theend;
> + goto err_unmap_sg;
> }
> addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
> cet->t_dst[0].addr = cpu_to_le32(addr_res);
> @@ -342,7 +340,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
> if (dma_mapping_error(ce->dev, addr_res)) {
> dev_err(ce->dev, "DMA map dest\n");
> err = -EINVAL;
> - goto theend;
> + goto err_unmap_sg;
> }
>
> byte_count = areq->nbytes;
> @@ -392,7 +390,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
> if (dma_mapping_error(ce->dev, addr_pad)) {
> dev_err(ce->dev, "DMA error on padding SG\n");
> err = -EINVAL;
> - goto theend;
> + goto err_addr_res;
> }
>
> if (ce->variant->hash_t_dlen_in_bits)
> @@ -405,15 +403,15 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
> err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
>
> dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
> +err_addr_res:
> + dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
> +err_unmap_sg:
> dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
> DMA_TO_DEVICE);
> - dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
> -
> -
> memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
The result should be copied only when everything is ok. Please add a "if (!err)"
Thanks for your work
Regards
Powered by blists - more mailing lists