[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID:
<DB9PR04MB8409F4DE065B0EBFD9B45D2CE76FA@DB9PR04MB8409.eurprd04.prod.outlook.com>
Date: Thu, 5 Jun 2025 08:49:34 +0000
From: Gaurav Jain <gaurav.jain@....com>
To: Meenakshi Aggarwal <meenakshi.aggarwal@....com>,
"herbert@...dor.apana.org.au" <herbert@...dor.apana.org.au>,
"davem@...emloft.net" <davem@...emloft.net>, "linux-crypto@...r.kernel.org"
<linux-crypto@...r.kernel.org>, "linux-kernel@...r.kernel.org"
<linux-kernel@...r.kernel.org>
Subject: RE: [PATCH] crypto: caam - Set DMA alignment explicitly
Reviewed-by: Gaurav Jain <gaurav.jain@....com
> -----Original Message-----
> From: Meenakshi Aggarwal <meenakshi.aggarwal@....com>
> Sent: Wednesday, June 4, 2025 3:45 PM
> To: Gaurav Jain <gaurav.jain@....com>; herbert@...dor.apana.org.au;
> davem@...emloft.net; linux-crypto@...r.kernel.org; linux-
> kernel@...r.kernel.org
> Cc: Meenakshi Aggarwal <meenakshi.aggarwal@....com>
> Subject: [PATCH] crypto: caam - Set DMA alignment explicitly
>
> From: Meenakshi Aggarwal <meenakshi.aggarwal@....com>
>
> Few DMA alignment were missed in the original patch.
>
> Fixes: 4cb4f7c11dee ("crypto: caam - Set DMA alignment explicitly")
>
> Signed-off-by: Meenakshi Aggarwal <meenakshi.aggarwal@....com>
> ---
> drivers/crypto/caam/caamalg.c | 22 +++++++++++-----------
> drivers/crypto/caam/caamalg_qi.c | 4 ++--
> 2 files changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
> index 2cfb1b8d8c7c..81dfbe436c20 100644
> --- a/drivers/crypto/caam/caamalg.c
> +++ b/drivers/crypto/caam/caamalg.c
> @@ -980,7 +980,7 @@ static void aead_crypt_done(struct device *jrdev, u32
> *desc, u32 err,
> void *context)
> {
> struct aead_request *req = context;
> - struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
> + struct caam_aead_req_ctx *rctx = aead_request_ctx_dma(req);
> struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
> struct aead_edesc *edesc;
> int ecode = 0;
> @@ -1020,7 +1020,7 @@ static void skcipher_crypt_done(struct device *jrdev,
> u32 *desc, u32 err, {
> struct skcipher_request *req = context;
> struct skcipher_edesc *edesc;
> - struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
> + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx_dma(req);
> struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
> struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
> int ivsize = crypto_skcipher_ivsize(skcipher); @@ -1309,7 +1309,7 @@
> static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
> struct crypto_aead *aead = crypto_aead_reqtfm(req);
> struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
> struct device *jrdev = ctx->jrdev;
> - struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
> + struct caam_aead_req_ctx *rctx = aead_request_ctx_dma(req);
> gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
> GFP_KERNEL : GFP_ATOMIC;
> int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
> @@ -1445,7 +1445,7 @@ static struct aead_edesc *aead_edesc_alloc(struct
> aead_request *req, static int aead_enqueue_req(struct device *jrdev, struct
> aead_request *req) {
> struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
> - struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
> + struct caam_aead_req_ctx *rctx = aead_request_ctx_dma(req);
> struct aead_edesc *edesc = rctx->edesc;
> u32 *desc = edesc->hw_desc;
> int ret;
> @@ -1541,7 +1541,7 @@ static int aead_do_one_req(struct crypto_engine
> *engine, void *areq) {
> struct aead_request *req = aead_request_cast(areq);
> struct caam_ctx *ctx =
> crypto_aead_ctx_dma(crypto_aead_reqtfm(req));
> - struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
> + struct caam_aead_req_ctx *rctx = aead_request_ctx_dma(req);
> u32 *desc = rctx->edesc->hw_desc;
> int ret;
>
> @@ -1614,7 +1614,7 @@ static struct skcipher_edesc
> *skcipher_edesc_alloc(struct skcipher_request *req, {
> struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
> struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
> - struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
> + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx_dma(req);
> struct device *jrdev = ctx->jrdev;
> gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
> GFP_KERNEL : GFP_ATOMIC;
> @@ -1778,7 +1778,7 @@ static int skcipher_do_one_req(struct crypto_engine
> *engine, void *areq) {
> struct skcipher_request *req = skcipher_request_cast(areq);
> struct caam_ctx *ctx =
> crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req));
> - struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
> + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx_dma(req);
> u32 *desc = rctx->edesc->hw_desc;
> int ret;
>
> @@ -1828,7 +1828,7 @@ static inline int skcipher_crypt(struct skcipher_request
> *req, bool encrypt)
>
> if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
> ctx->xts_key_fallback)) {
> - struct caam_skcipher_req_ctx *rctx =
> skcipher_request_ctx(req);
> + struct caam_skcipher_req_ctx *rctx =
> skcipher_request_ctx_dma(req);
>
> skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
> skcipher_request_set_callback(&rctx->fallback_req,
> @@ -3639,10 +3639,10 @@ static int caam_cra_init(struct crypto_skcipher
> *tfm)
> }
>
> ctx->fallback = fallback;
> - crypto_skcipher_set_reqsize(tfm, sizeof(struct
> caam_skcipher_req_ctx) +
> + crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct
> +caam_skcipher_req_ctx) +
> crypto_skcipher_reqsize(fallback));
> } else {
> - crypto_skcipher_set_reqsize(tfm, sizeof(struct
> caam_skcipher_req_ctx));
> + crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct
> +caam_skcipher_req_ctx));
> }
>
> ret = caam_init_common(ctx, &caam_alg->caam, false); @@ -3659,7
> +3659,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
> container_of(alg, struct caam_aead_alg, aead.base);
> struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
>
> - crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
> + crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_aead_req_ctx));
>
> return caam_init_common(ctx, &caam_alg->caam, !caam_alg-
> >caam.nodkp); } diff --git a/drivers/crypto/caam/caamalg_qi.c
> b/drivers/crypto/caam/caamalg_qi.c
> index 65f6adb6c673..9aa2d6d97f22 100644
> --- a/drivers/crypto/caam/caamalg_qi.c
> +++ b/drivers/crypto/caam/caamalg_qi.c
> @@ -1435,7 +1435,7 @@ static inline int skcipher_crypt(struct skcipher_request
> *req, bool encrypt)
>
> if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
> ctx->xts_key_fallback)) {
> - struct caam_skcipher_req_ctx *rctx =
> skcipher_request_ctx(req);
> + struct caam_skcipher_req_ctx *rctx =
> skcipher_request_ctx_dma(req);
>
> skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
> skcipher_request_set_callback(&rctx->fallback_req,
> @@ -2524,7 +2524,7 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
> }
>
> ctx->fallback = fallback;
> - crypto_skcipher_set_reqsize(tfm, sizeof(struct
> caam_skcipher_req_ctx) +
> + crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct
> +caam_skcipher_req_ctx) +
> crypto_skcipher_reqsize(fallback));
> }
>
> --
> 2.25.1
Powered by blists - more mailing lists