[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230609103842.4159321-3-meenakshi.aggarwal@nxp.com>
Date: Fri, 9 Jun 2023 12:38:39 +0200
From: meenakshi.aggarwal@....com
To: horia.geanta@....com, V.sethi@....com, pankaj.gupta@....com,
gaurav.jain@....com, herbert@...dor.apana.org.au,
davem@...emloft.net, linux-crypto@...r.kernel.org,
linux-kernel@...r.kernel.org, iuliana.prodan@....com
Cc: Meenakshi Aggarwal <meenakshi.aggarwal@....com>
Subject: [PATCH v2 2/5] crypto:caam - avoid allocating memory at crypto request runtime for aead
From: Meenakshi Aggarwal <meenakshi.aggarwal@....com>
Remove CRYPTO_ALG_ALLOCATES_MEMORY flag and allocate the memory
needed by the driver, to fulfil a request, within the crypto
request object.
The extra size needed for base extended descriptor, hw
descriptor commands and link tables is computed in frontend
driver (caamalg) initialization and saved in reqsize field
that indicates how much memory could be needed per request.
In reqsize we allocate memory for maximum 4 entries
for src and 4 for dst, aligned.
If the driver needs more than the 4 entries maximum, the memory
is dynamically allocated, at runtime.
Signed-off-by: Iuliana Prodan <iuliana.prodan@....com>
Signed-off-by: Meenakshi Aggarwal <meenakshi.aggarwal@....com>
---
drivers/crypto/caam/caamalg.c | 69 ++++++++++++++++++++++++++---------
1 file changed, 52 insertions(+), 17 deletions(-)
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index dbc5d5eaf695..dea2a32f2f25 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -886,6 +886,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
* @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @bklog: stored to determine if the request needs backlog
+ * @free: stored to determine if aead_edesc needs to be freed
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
@@ -897,6 +898,7 @@ struct aead_edesc {
int mapped_dst_nents;
int sec4_sg_bytes;
bool bklog;
+ bool free;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
u32 hw_desc[];
@@ -993,8 +995,8 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
-
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
/*
* If no backlog flag, the completion of the request is done
@@ -1313,7 +1315,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
int src_len, dst_len = 0;
struct aead_edesc *edesc;
- int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
+ int sec4_sg_index, sec4_sg_len, sec4_sg_bytes, edesc_size = 0;
unsigned int authsize = ctx->authsize;
if (unlikely(req->dst != req->src)) {
@@ -1393,12 +1395,30 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
- /* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags);
- if (!edesc) {
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
- return ERR_PTR(-ENOMEM);
+ /* Check if there's enough space for edesc saved in req */
+ edesc_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
+ if (edesc_size > (crypto_aead_reqsize(aead) -
+ sizeof(struct caam_aead_req_ctx))) {
+ /*
+ * allocate space for base edesc and
+ * hw desc commands, link tables
+ */
+ edesc = kzalloc(edesc_size, flags);
+ if (!edesc) {
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+ edesc->free = true;
+ } else {
+ /*
+ * get address for base edesc and
+ * hw desc commands, link tables
+ */
+ edesc = (struct aead_edesc *)((u8 *)rctx +
+ sizeof(struct caam_aead_req_ctx));
+ /* clear memory */
+ memset(edesc, 0, sizeof(*edesc));
}
edesc->src_nents = src_nents;
@@ -1431,7 +1451,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
aead_unmap(jrdev, edesc, req);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1461,7 +1482,8 @@ static int aead_enqueue_req(struct device *jrdev, struct aead_request *req)
if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
aead_unmap(jrdev, edesc, req);
- kfree(rctx->edesc);
+ if (rctx->edesc->free)
+ kfree(rctx->edesc);
}
return ret;
@@ -1552,7 +1574,8 @@ static int aead_do_one_req(struct crypto_engine *engine, void *areq)
if (ret != -EINPROGRESS) {
aead_unmap(ctx->jrdev, rctx->edesc, req);
- kfree(rctx->edesc);
+ if (rctx->edesc->free)
+ kfree(rctx->edesc);
} else {
ret = 0;
}
@@ -3450,8 +3473,10 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
/* link tables for src and dst:
* 4 entries max + 1 for IV, aligned = 8
*/
- (16 * sizeof(struct sec4_sg_entry)) +
- AES_BLOCK_SIZE; /* ivsize */
+ (16 * sizeof(struct sec4_sg_entry));
+ extra_reqsize = ALIGN(extra_reqsize, dma_get_cache_alignment());
+ extra_reqsize += ~(ARCH_KMALLOC_MINALIGN - 1) & (dma_get_cache_alignment() - 1);
+ extra_reqsize += ALIGN(AES_BLOCK_SIZE, dma_get_cache_alignment()); /* ivsize */
if (alg_aai == OP_ALG_AAI_XTS) {
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
@@ -3487,8 +3512,19 @@ static int caam_aead_init(struct crypto_aead *tfm)
struct caam_aead_alg *caam_alg =
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
+ int extra_reqsize = 0;
+
+ /*
+ * Compute extra space needed for base edesc and
+ * hw desc commands, link tables, IV
+ */
+ extra_reqsize = sizeof(struct aead_edesc) +
+ /* max size for hw desc commands */
+ (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) +
+ /* link tables for src and dst, 4 entries max, aligned */
+ (8 * sizeof(struct sec4_sg_entry));
- crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx) + extra_reqsize);
ctx->enginectx.op.do_one_request = aead_do_one_req;
@@ -3557,8 +3593,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_aead_init;
alg->exit = caam_aead_exit;
--
2.25.1
Powered by blists - more mailing lists