lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230609103842.4159321-6-meenakshi.aggarwal@nxp.com>
Date:   Fri,  9 Jun 2023 12:38:42 +0200
From:   meenakshi.aggarwal@....com
To:     horia.geanta@....com, V.sethi@....com, pankaj.gupta@....com,
        gaurav.jain@....com, herbert@...dor.apana.org.au,
        davem@...emloft.net, linux-crypto@...r.kernel.org,
        linux-kernel@...r.kernel.org, iuliana.prodan@....com
Cc:     Meenakshi Aggarwal <meenakshi.aggarwal@....com>
Subject: [PATCH v2 5/5] crypto: caam/qi2 - avoid allocating memory at crypto request runtime

From: Meenakshi Aggarwal <meenakshi.aggarwal@....com>

Remove CRYPTO_ALG_ALLOCATES_MEMORY flag and allocate the memory
needed by the driver, to fulfil a request, within the crypto
request object.
The extra size needed for base extended descriptor, hw
descriptor commands and link tables is computed in frontend
driver (caamalg_qi2) initialization and saved in reqsize field
that indicates how much memory could be needed per request.

In reqsize we allocate memory for maximum 4 entries
for src and 4 for dst, aligned.
If the driver needs more than the 4 entries maximum, the memory
is dynamically allocated, at runtime.

Signed-off-by: Iuliana Prodan <iuliana.prodan@....com>
Signed-off-by: Meenakshi Aggarwal <meenakshi.aggarwal@....com>
---
 drivers/crypto/caam/caamalg_qi2.c | 421 ++++++++++++++++++++----------
 drivers/crypto/caam/caamalg_qi2.h |   6 +
 2 files changed, 293 insertions(+), 134 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 5c8d35edaa1c..5bf6e29ec2f5 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -367,17 +367,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 	dma_addr_t qm_sg_dma, iv_dma = 0;
 	int ivsize = 0;
 	unsigned int authsize = ctx->authsize;
-	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
+	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes, edesc_size = 0;
 	int in_len, out_len;
 	struct dpaa2_sg_entry *sg_table;
 
-	/* allocate space for base edesc, link tables and IV */
-	edesc = qi_cache_zalloc(flags);
-	if (unlikely(!edesc)) {
-		dev_err(dev, "could not allocate extended descriptor\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
 	if (unlikely(req->dst != req->src)) {
 		src_len = req->assoclen + req->cryptlen;
 		dst_len = src_len + (encrypt ? authsize : (-authsize));
@@ -386,7 +379,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 		if (unlikely(src_nents < 0)) {
 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
 				src_len);
-			qi_cache_free(edesc);
 			return ERR_PTR(src_nents);
 		}
 
@@ -394,7 +386,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 		if (unlikely(dst_nents < 0)) {
 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
 				dst_len);
-			qi_cache_free(edesc);
 			return ERR_PTR(dst_nents);
 		}
 
@@ -403,7 +394,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 						      DMA_TO_DEVICE);
 			if (unlikely(!mapped_src_nents)) {
 				dev_err(dev, "unable to map source\n");
-				qi_cache_free(edesc);
 				return ERR_PTR(-ENOMEM);
 			}
 		} else {
@@ -417,7 +407,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 				dev_err(dev, "unable to map destination\n");
 				dma_unmap_sg(dev, req->src, src_nents,
 					     DMA_TO_DEVICE);
-				qi_cache_free(edesc);
 				return ERR_PTR(-ENOMEM);
 			}
 		} else {
@@ -431,7 +420,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 		if (unlikely(src_nents < 0)) {
 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
 				src_len);
-			qi_cache_free(edesc);
 			return ERR_PTR(src_nents);
 		}
 
@@ -439,7 +427,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 					      DMA_BIDIRECTIONAL);
 		if (unlikely(!mapped_src_nents)) {
 			dev_err(dev, "unable to map source\n");
-			qi_cache_free(edesc);
 			return ERR_PTR(-ENOMEM);
 		}
 	}
@@ -469,18 +456,35 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 	else
 		qm_sg_nents = pad_sg_nents(qm_sg_nents);
 
-	sg_table = &edesc->sgt[0];
 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
-	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
-		     CAAM_QI_MEMCACHE_SIZE)) {
+
+	/* Check if there's enough space for edesc saved in req */
+	edesc_size = offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize;
+	if (unlikely(edesc_size > CAAM_QI_MEMCACHE_SIZE)) {
 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
 			qm_sg_nents, ivsize);
 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
 			   0, DMA_NONE, 0, 0);
-		qi_cache_free(edesc);
 		return ERR_PTR(-ENOMEM);
+	} else if (edesc_size > (crypto_aead_reqsize(aead) -
+				 sizeof(struct caam_request))) {
+		/* allocate space for base edesc, link tables and IV */
+		edesc = qi_cache_zalloc(flags);
+		if (unlikely(!edesc)) {
+			dev_err(dev, "could not allocate extended descriptor\n");
+			return ERR_PTR(-ENOMEM);
+		}
+		edesc->free = true;
+	} else {
+		/* get address for base edesc, link tables and IV */
+		edesc = (struct aead_edesc *)((u8 *)req_ctx +
+			 sizeof(struct caam_request));
+		/* clear memory */
+		memset(edesc, 0, sizeof(*edesc));
 	}
 
+	sg_table = &edesc->sgt[0];
+
 	if (ivsize) {
 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
 
@@ -492,7 +496,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 			dev_err(dev, "unable to map IV\n");
 			caam_unmap(dev, req->src, req->dst, src_nents,
 				   dst_nents, 0, 0, DMA_NONE, 0, 0);
-			qi_cache_free(edesc);
+			if (edesc->free)
+				qi_cache_free(edesc);
 			return ERR_PTR(-ENOMEM);
 		}
 	}
@@ -516,7 +521,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 		dev_err(dev, "unable to map assoclen\n");
 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
-		qi_cache_free(edesc);
+		if (edesc->free)
+			qi_cache_free(edesc);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -538,7 +544,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
-		qi_cache_free(edesc);
+		if (edesc->free)
+			qi_cache_free(edesc);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1123,7 +1130,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
 	dma_addr_t iv_dma;
 	u8 *iv;
 	int ivsize = crypto_skcipher_ivsize(skcipher);
-	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+	int dst_sg_idx, qm_sg_ents, qm_sg_bytes, edesc_size = 0;
 	struct dpaa2_sg_entry *sg_table;
 
 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
@@ -1181,22 +1188,31 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
 		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
 
 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
-	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
-		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+	/* Check if there's enough space for edesc saved in req */
+	edesc_size = offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + ivsize;
+	if (unlikely(edesc_size > CAAM_QI_MEMCACHE_SIZE)) {
 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
 			qm_sg_ents, ivsize);
 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
 			   0, DMA_NONE, 0, 0);
 		return ERR_PTR(-ENOMEM);
-	}
-
-	/* allocate space for base edesc, link tables and IV */
-	edesc = qi_cache_zalloc(flags);
-	if (unlikely(!edesc)) {
-		dev_err(dev, "could not allocate extended descriptor\n");
-		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
-			   0, DMA_NONE, 0, 0);
-		return ERR_PTR(-ENOMEM);
+	} else if (edesc_size > (crypto_skcipher_reqsize(skcipher) -
+				 sizeof(struct caam_request))) {
+		/* allocate space for base edesc, link tables and IV */
+		edesc = qi_cache_zalloc(flags);
+		if (unlikely(!edesc)) {
+			dev_err(dev, "could not allocate extended descriptor\n");
+			caam_unmap(dev, req->src, req->dst, src_nents,
+				   dst_nents, 0, 0, DMA_NONE, 0, 0);
+			return ERR_PTR(-ENOMEM);
+		}
+		edesc->free = true;
+	} else {
+		/* get address for base edesc, link tables and IV */
+		edesc = (struct skcipher_edesc *)((u8 *)req_ctx +
+			 sizeof(struct caam_request));
+		/* clear memory */
+		memset(edesc, 0, sizeof(*edesc));
 	}
 
 	/* Make sure IV is located in a DMAable area */
@@ -1209,7 +1225,8 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
 		dev_err(dev, "unable to map IV\n");
 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
 			   0, DMA_NONE, 0, 0);
-		qi_cache_free(edesc);
+		if (edesc->free)
+			qi_cache_free(edesc);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1233,7 +1250,8 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
 		dev_err(dev, "unable to map S/G table\n");
 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
 			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
-		qi_cache_free(edesc);
+		if (edesc->free)
+			qi_cache_free(edesc);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1297,7 +1315,8 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status)
 		ecode = caam_qi2_strstatus(ctx->dev, status);
 
 	aead_unmap(ctx->dev, edesc, req);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	aead_request_complete(req, ecode);
 }
 
@@ -1318,7 +1337,8 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status)
 		ecode = caam_qi2_strstatus(ctx->dev, status);
 
 	aead_unmap(ctx->dev, edesc, req);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	aead_request_complete(req, ecode);
 }
 
@@ -1344,7 +1364,8 @@ static int aead_encrypt(struct aead_request *req)
 	if (ret != -EINPROGRESS &&
 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
 		aead_unmap(ctx->dev, edesc, req);
-		qi_cache_free(edesc);
+		if (edesc->free)
+			qi_cache_free(edesc);
 	}
 
 	return ret;
@@ -1372,7 +1393,8 @@ static int aead_decrypt(struct aead_request *req)
 	if (ret != -EINPROGRESS &&
 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
 		aead_unmap(ctx->dev, edesc, req);
-		qi_cache_free(edesc);
+		if (edesc->free)
+			qi_cache_free(edesc);
 	}
 
 	return ret;
@@ -1422,7 +1444,8 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
 		       ivsize);
 
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	skcipher_request_complete(req, ecode);
 }
 
@@ -1460,7 +1483,8 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
 		       ivsize);
 
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	skcipher_request_complete(req, ecode);
 }
 
@@ -1516,7 +1540,8 @@ static int skcipher_encrypt(struct skcipher_request *req)
 	if (ret != -EINPROGRESS &&
 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
 		skcipher_unmap(ctx->dev, edesc, req);
-		qi_cache_free(edesc);
+		if (edesc->free)
+			qi_cache_free(edesc);
 	}
 
 	return ret;
@@ -1566,7 +1591,8 @@ static int skcipher_decrypt(struct skcipher_request *req)
 	if (ret != -EINPROGRESS &&
 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
 		skcipher_unmap(ctx->dev, edesc, req);
-		qi_cache_free(edesc);
+		if (edesc->free)
+			qi_cache_free(edesc);
 	}
 
 	return ret;
@@ -1607,7 +1633,15 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
 		container_of(alg, typeof(*caam_alg), skcipher);
 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
 	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
-	int ret = 0;
+	int ret = 0, extra_reqsize = 0;
+
+	/* Compute extra space needed for base edesc, link tables and IV */
+	extra_reqsize = sizeof(struct skcipher_edesc) +
+			/* link tables for src and dst:
+			 * 4 entries max + 1 for IV, aligned = 8
+			 */
+			(16 * sizeof(struct dpaa2_sg_entry)) +
+			AES_BLOCK_SIZE; /* ivsize */
 
 	if (alg_aai == OP_ALG_AAI_XTS) {
 		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
@@ -1625,10 +1659,12 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
 		ctx->fallback = fallback;
 		crypto_skcipher_set_reqsize_dma(
 			tfm, sizeof(struct caam_request) +
-			     crypto_skcipher_reqsize(fallback));
+			crypto_skcipher_reqsize(fallback +
+			extra_reqsize));
 	} else {
 		crypto_skcipher_set_reqsize_dma(tfm,
-						sizeof(struct caam_request));
+						sizeof(struct caam_request) +
+						extra_reqsize);
 	}
 
 	ret = caam_cra_init(ctx, &caam_alg->caam, false);
@@ -1644,7 +1680,18 @@ static int caam_cra_init_aead(struct crypto_aead *tfm)
 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
 						      aead);
 
-	crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
+	int extra_reqsize = 0;
+
+	/* Compute extra space needed for base edesc, link tables and IV */
+	extra_reqsize = sizeof(struct aead_edesc) +
+			/* link tables for src and dst:
+			 * 4 entries max + 1 for IV, aligned = 8
+			 */
+			(16 * sizeof(struct dpaa2_sg_entry)) +
+			AES_BLOCK_SIZE; /* ivsize */
+
+	crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request) +
+				    extra_reqsize);
 	return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
 			     !caam_alg->caam.nodkp);
 }
@@ -3013,8 +3060,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
 	alg->base.cra_module = THIS_MODULE;
 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
 	alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
-	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
-			      CRYPTO_ALG_KERN_DRIVER_ONLY);
+	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY);
 
 	alg->init = caam_cra_init_skcipher;
 	alg->exit = caam_cra_exit;
@@ -3027,8 +3073,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
 	alg->base.cra_module = THIS_MODULE;
 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
 	alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
-	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
-			      CRYPTO_ALG_KERN_DRIVER_ONLY;
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
 
 	alg->init = caam_cra_init_aead;
 	alg->exit = caam_cra_exit_aead;
@@ -3413,7 +3458,8 @@ static void ahash_done(void *cbk_ctx, u32 status)
 
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
 	memcpy(req->result, state->caam_ctx, digestsize);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 
 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
@@ -3438,7 +3484,8 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
 		ecode = caam_qi2_strstatus(ctx->dev, status);
 
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 
 	scatterwalk_map_and_copy(state->buf, req->src,
 				 req->nbytes - state->next_buflen,
@@ -3478,7 +3525,8 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
 
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
 	memcpy(req->result, state->caam_ctx, digestsize);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 
 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
@@ -3503,7 +3551,8 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
 		ecode = caam_qi2_strstatus(ctx->dev, status);
 
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 
 	scatterwalk_map_and_copy(state->buf, req->src,
 				 req->nbytes - state->next_buflen,
@@ -3541,7 +3590,7 @@ static int ahash_update_ctx(struct ahash_request *req)
 	int in_len = *buflen + req->nbytes, to_hash;
 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
 	struct ahash_edesc *edesc;
-	int ret = 0;
+	int ret = 0, edesc_size = 0;
 
 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
 	to_hash = in_len - *next_buflen;
@@ -3567,18 +3616,31 @@ static int ahash_update_ctx(struct ahash_request *req)
 			mapped_nents = 0;
 		}
 
-		/* allocate space for base edesc and link tables */
-		edesc = qi_cache_zalloc(flags);
-		if (!edesc) {
-			dma_unmap_sg(ctx->dev, req->src, src_nents,
-				     DMA_TO_DEVICE);
-			return -ENOMEM;
-		}
-
-		edesc->src_nents = src_nents;
 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
 		qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
 			      sizeof(*sg_table);
+
+		/* Check if there's enough space for edesc saved in req */
+		edesc_size = sizeof(*edesc) +  qm_sg_bytes;
+		if (edesc_size > (crypto_ahash_reqsize(ahash) -
+				  sizeof(struct caam_hash_state))) {
+			/* allocate space for base edesc and link tables */
+			edesc = qi_cache_zalloc(flags);
+			if (!edesc) {
+				dma_unmap_sg(ctx->dev, req->src, src_nents,
+					     DMA_TO_DEVICE);
+				return -ENOMEM;
+			}
+			edesc->free = true;
+		} else {
+			/* get address for base edesc and link tables */
+			edesc = (struct ahash_edesc *)((u8 *)state +
+				 sizeof(struct caam_hash_state));
+			/* clear memory */
+			memset(edesc, 0, sizeof(*edesc));
+		}
+
+		edesc->src_nents = src_nents;
 		sg_table = &edesc->sgt[0];
 
 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
@@ -3640,7 +3702,8 @@ static int ahash_update_ctx(struct ahash_request *req)
 	return ret;
 unmap_ctx:
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	return ret;
 }
 
@@ -3655,18 +3718,31 @@ static int ahash_final_ctx(struct ahash_request *req)
 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 		      GFP_KERNEL : GFP_ATOMIC;
 	int buflen = state->buflen;
-	int qm_sg_bytes;
+	int qm_sg_bytes, edesc_size = 0;
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct ahash_edesc *edesc;
 	struct dpaa2_sg_entry *sg_table;
 	int ret;
 
-	/* allocate space for base edesc and link tables */
-	edesc = qi_cache_zalloc(flags);
-	if (!edesc)
-		return -ENOMEM;
-
 	qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
+
+	/* Check if there's enough space for edesc saved in req */
+	edesc_size = sizeof(*edesc) +  qm_sg_bytes;
+	if (edesc_size > (crypto_ahash_reqsize(ahash) -
+			  sizeof(struct caam_hash_state))) {
+		/* allocate space for base edesc and link tables */
+		edesc = qi_cache_zalloc(flags);
+		if (!edesc)
+			return -ENOMEM;
+		edesc->free = true;
+	} else {
+		/* get address for base edesc and link tables */
+		edesc = (struct ahash_edesc *)((u8 *)state +
+			 sizeof(struct caam_hash_state));
+		/* clear memory */
+		memset(edesc, 0, sizeof(*edesc));
+	}
+
 	sg_table = &edesc->sgt[0];
 
 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
@@ -3711,7 +3787,8 @@ static int ahash_final_ctx(struct ahash_request *req)
 
 unmap_ctx:
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	return ret;
 }
 
@@ -3726,7 +3803,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 		      GFP_KERNEL : GFP_ATOMIC;
 	int buflen = state->buflen;
-	int qm_sg_bytes, qm_sg_src_index;
+	int qm_sg_bytes, qm_sg_src_index, edesc_size = 0;
 	int src_nents, mapped_nents;
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct ahash_edesc *edesc;
@@ -3750,17 +3827,31 @@ static int ahash_finup_ctx(struct ahash_request *req)
 		mapped_nents = 0;
 	}
 
-	/* allocate space for base edesc and link tables */
-	edesc = qi_cache_zalloc(flags);
-	if (!edesc) {
-		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
-		return -ENOMEM;
-	}
-
-	edesc->src_nents = src_nents;
 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
 	qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
 		      sizeof(*sg_table);
+
+	/* Check if there's enough space for edesc saved in req */
+	edesc_size = sizeof(*edesc) +  qm_sg_bytes;
+	if (edesc_size > (crypto_ahash_reqsize(ahash) -
+			  sizeof(struct caam_hash_state))) {
+		/* allocate space for base edesc and link tables */
+		edesc = qi_cache_zalloc(flags);
+		if (!edesc) {
+			dma_unmap_sg(ctx->dev, req->src, src_nents,
+				     DMA_TO_DEVICE);
+			return -ENOMEM;
+		}
+		edesc->free = true;
+	} else {
+		/* get address for base edesc and link tables */
+		edesc = (struct ahash_edesc *)((u8 *)state +
+			 sizeof(struct caam_hash_state));
+		/* clear memory */
+		memset(edesc, 0, sizeof(*edesc));
+	}
+
+	edesc->src_nents = src_nents;
 	sg_table = &edesc->sgt[0];
 
 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
@@ -3805,7 +3896,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
 
 unmap_ctx:
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	return ret;
 }
 
@@ -3820,8 +3912,9 @@ static int ahash_digest(struct ahash_request *req)
 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 		      GFP_KERNEL : GFP_ATOMIC;
 	int digestsize = crypto_ahash_digestsize(ahash);
-	int src_nents, mapped_nents;
+	int src_nents, mapped_nents, qm_sg_bytes, edesc_size = 0;
 	struct ahash_edesc *edesc;
+	struct dpaa2_sg_entry *sg_table;
 	int ret = -ENOMEM;
 
 	state->buf_dma = 0;
@@ -3843,21 +3936,33 @@ static int ahash_digest(struct ahash_request *req)
 		mapped_nents = 0;
 	}
 
-	/* allocate space for base edesc and link tables */
-	edesc = qi_cache_zalloc(flags);
-	if (!edesc) {
-		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
-		return ret;
+	qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
+
+	/* Check if there's enough space for edesc saved in req */
+	edesc_size = sizeof(*edesc) +  qm_sg_bytes;
+	if (edesc_size > (crypto_ahash_reqsize(ahash) -
+			  sizeof(struct caam_hash_state))) {
+		/* allocate space for base edesc and link tables */
+		edesc = qi_cache_zalloc(flags);
+		if (!edesc) {
+			dma_unmap_sg(ctx->dev, req->src, src_nents,
+				     DMA_TO_DEVICE);
+			return ret;
+		}
+		edesc->free = true;
+	} else {
+		/* get address for base edesc and link tables */
+		edesc = (struct ahash_edesc *)((u8 *)state +
+			 sizeof(struct caam_hash_state));
+		/* clear memory */
+		memset(edesc, 0, sizeof(*edesc));
 	}
 
 	edesc->src_nents = src_nents;
 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
 
 	if (mapped_nents > 1) {
-		int qm_sg_bytes;
-		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
-
-		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
+		sg_table = &edesc->sgt[0];
 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
 						  qm_sg_bytes, DMA_TO_DEVICE);
@@ -3900,7 +4005,8 @@ static int ahash_digest(struct ahash_request *req)
 
 unmap:
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	return ret;
 }
 
@@ -3912,18 +4018,17 @@ static int ahash_final_no_ctx(struct ahash_request *req)
 	struct caam_request *req_ctx = &state->caam_req;
 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-		      GFP_KERNEL : GFP_ATOMIC;
 	u8 *buf = state->buf;
 	int buflen = state->buflen;
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct ahash_edesc *edesc;
 	int ret = -ENOMEM;
 
-	/* allocate space for base edesc and link tables */
-	edesc = qi_cache_zalloc(flags);
-	if (!edesc)
-		return ret;
+	/* get address for base edesc and link tables */
+	edesc = (struct ahash_edesc *)((u8 *)state +
+		 sizeof(struct caam_hash_state));
+	/* clear memory */
+	memset(edesc, 0, sizeof(*edesc));
 
 	if (buflen) {
 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
@@ -3973,7 +4078,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
 
 unmap:
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
-	qi_cache_free(edesc);
 	return ret;
 }
 
@@ -3991,7 +4095,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 	int *buflen = &state->buflen;
 	int *next_buflen = &state->next_buflen;
 	int in_len = *buflen + req->nbytes, to_hash;
-	int qm_sg_bytes, src_nents, mapped_nents;
+	int qm_sg_bytes, src_nents, mapped_nents, edesc_size = 0;
 	struct ahash_edesc *edesc;
 	int ret = 0;
 
@@ -4019,17 +4123,30 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 			mapped_nents = 0;
 		}
 
-		/* allocate space for base edesc and link tables */
-		edesc = qi_cache_zalloc(flags);
-		if (!edesc) {
-			dma_unmap_sg(ctx->dev, req->src, src_nents,
-				     DMA_TO_DEVICE);
-			return -ENOMEM;
+		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
+			      sizeof(*sg_table);
+
+		/* Check if there's enough space for edesc saved in req */
+		edesc_size = sizeof(*edesc) +  qm_sg_bytes;
+		if (edesc_size > (crypto_ahash_reqsize(ahash) -
+				  sizeof(struct caam_hash_state))) {
+			/* allocate space for base edesc and link tables */
+			edesc = qi_cache_zalloc(flags);
+			if (!edesc) {
+				dma_unmap_sg(ctx->dev, req->src, src_nents,
+					     DMA_TO_DEVICE);
+				return -ENOMEM;
+			}
+			edesc->free = true;
+		} else {
+			/* get address for base edesc and link tables */
+			edesc = (struct ahash_edesc *)((u8 *)state +
+				 sizeof(struct caam_hash_state));
+			/* clear memory */
+			memset(edesc, 0, sizeof(*edesc));
 		}
 
 		edesc->src_nents = src_nents;
-		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
-			      sizeof(*sg_table);
 		sg_table = &edesc->sgt[0];
 
 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
@@ -4094,7 +4211,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 	return ret;
 unmap_ctx:
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	return ret;
 }
 
@@ -4109,7 +4227,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 		      GFP_KERNEL : GFP_ATOMIC;
 	int buflen = state->buflen;
-	int qm_sg_bytes, src_nents, mapped_nents;
+	int qm_sg_bytes, src_nents, mapped_nents, edesc_size = 0;
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct ahash_edesc *edesc;
 	struct dpaa2_sg_entry *sg_table;
@@ -4132,15 +4250,29 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 		mapped_nents = 0;
 	}
 
-	/* allocate space for base edesc and link tables */
-	edesc = qi_cache_zalloc(flags);
-	if (!edesc) {
-		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
-		return ret;
+	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
+
+	/* Check if there's enough space for edesc saved in req */
+	edesc_size = sizeof(*edesc) +  qm_sg_bytes;
+	if (edesc_size > (crypto_ahash_reqsize(ahash) -
+			  sizeof(struct caam_hash_state))) {
+		/* allocate space for base edesc and link tables */
+		edesc = qi_cache_zalloc(flags);
+		if (!edesc) {
+			dma_unmap_sg(ctx->dev, req->src, src_nents,
+				     DMA_TO_DEVICE);
+			return ret;
+		}
+		edesc->free = true;
+	} else {
+		/* get address for base edesc and link tables */
+		edesc = (struct ahash_edesc *)((u8 *)state +
+			 sizeof(struct caam_hash_state));
+		/* clear memory */
+		memset(edesc, 0, sizeof(*edesc));
 	}
 
 	edesc->src_nents = src_nents;
-	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
 	sg_table = &edesc->sgt[0];
 
 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
@@ -4190,7 +4322,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 	return ret;
 unmap:
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	return ret;
 }
 
@@ -4208,7 +4341,7 @@ static int ahash_update_first(struct ahash_request *req)
 	int *buflen = &state->buflen;
 	int *next_buflen = &state->next_buflen;
 	int to_hash;
-	int src_nents, mapped_nents;
+	int src_nents, mapped_nents, qm_sg_bytes, edesc_size = 0;
 	struct ahash_edesc *edesc;
 	int ret = 0;
 
@@ -4237,12 +4370,26 @@ static int ahash_update_first(struct ahash_request *req)
 			mapped_nents = 0;
 		}
 
-		/* allocate space for base edesc and link tables */
-		edesc = qi_cache_zalloc(flags);
-		if (!edesc) {
-			dma_unmap_sg(ctx->dev, req->src, src_nents,
-				     DMA_TO_DEVICE);
-			return -ENOMEM;
+		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
+
+		/* Check if there's enough space for edesc saved in req */
+		edesc_size = sizeof(*edesc) +  qm_sg_bytes;
+		if (edesc_size > (crypto_ahash_reqsize(ahash) -
+				  sizeof(struct caam_hash_state))) {
+			/* allocate space for base edesc and link tables */
+			edesc = qi_cache_zalloc(flags);
+			if (!edesc) {
+				dma_unmap_sg(ctx->dev, req->src, src_nents,
+					     DMA_TO_DEVICE);
+				return -ENOMEM;
+			}
+			edesc->free = true;
+		} else {
+			/* get address for base edesc and link tables */
+			edesc = (struct ahash_edesc *)((u8 *)state +
+				 sizeof(struct caam_hash_state));
+			/* clear memory */
+			memset(edesc, 0, sizeof(*edesc));
 		}
 
 		edesc->src_nents = src_nents;
@@ -4253,11 +4400,7 @@ static int ahash_update_first(struct ahash_request *req)
 		dpaa2_fl_set_len(in_fle, to_hash);
 
 		if (mapped_nents > 1) {
-			int qm_sg_bytes;
-
 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
-			qm_sg_bytes = pad_sg_nents(mapped_nents) *
-				      sizeof(*sg_table);
 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
 							  qm_sg_bytes,
 							  DMA_TO_DEVICE);
@@ -4319,7 +4462,8 @@ static int ahash_update_first(struct ahash_request *req)
 	return ret;
 unmap_ctx:
 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
-	qi_cache_free(edesc);
+	if (edesc->free)
+		qi_cache_free(edesc);
 	return ret;
 }
 
@@ -4566,7 +4710,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
 					 HASH_MSG_LEN + 64,
 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
 	dma_addr_t dma_addr;
-	int i;
+	int i, extra_reqsize = 0;
 
 	ctx->dev = caam_hash->dev;
 
@@ -4604,7 +4748,16 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
 				   OP_ALG_ALGSEL_SUBMASK) >>
 				  OP_ALG_ALGSEL_SHIFT];
 
-	crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
+	/* Compute extra space needed for base edesc and link tables */
+	extra_reqsize = sizeof(struct ahash_edesc) +
+			/* link tables for src:
+			 * 4 entries max + max 2 for remaining buf, aligned = 8
+			 */
+			(8 * sizeof(struct dpaa2_sg_entry));
+
+	crypto_ahash_set_reqsize_dma(ahash,
+				     sizeof(struct caam_hash_state) +
+				     extra_reqsize);
 
 	/*
 	 * For keyed hash algorithms shared descriptors
@@ -4659,7 +4812,7 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
 	alg->cra_priority = CAAM_CRA_PRIORITY;
 	alg->cra_blocksize = template->blocksize;
 	alg->cra_alignmask = 0;
-	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
+	alg->cra_flags = CRYPTO_ALG_ASYNC;
 
 	t_alg->alg_type = template->alg_type;
 	t_alg->dev = dev;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index abb502bb675c..c700438de9e6 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -100,6 +100,7 @@ struct dpaa2_caam_priv_per_cpu {
  * @dst_nents: number of segments in output scatterlist
  * @iv_dma: dma address of iv for checking continuity and link table
  * @qm_sg_bytes: length of dma mapped h/w link table
+ * @free: stored to determine if aead_edesc needs to be freed
  * @qm_sg_dma: bus physical mapped address of h/w link table
  * @assoclen: associated data length, in CAAM endianness
  * @assoclen_dma: bus physical mapped address of req->assoclen
@@ -110,6 +111,7 @@ struct aead_edesc {
 	int dst_nents;
 	dma_addr_t iv_dma;
 	int qm_sg_bytes;
+	bool free;
 	dma_addr_t qm_sg_dma;
 	unsigned int assoclen;
 	dma_addr_t assoclen_dma;
@@ -122,6 +124,7 @@ struct aead_edesc {
  * @dst_nents: number of segments in output scatterlist
  * @iv_dma: dma address of iv for checking continuity and link table
  * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @free: stored to determine if skcipher_edesc needs to be freed
  * @qm_sg_dma: I/O virtual address of h/w link table
  * @sgt: the h/w link table, followed by IV
  */
@@ -130,6 +133,7 @@ struct skcipher_edesc {
 	int dst_nents;
 	dma_addr_t iv_dma;
 	int qm_sg_bytes;
+	bool free;
 	dma_addr_t qm_sg_dma;
 	struct dpaa2_sg_entry sgt[];
 };
@@ -139,12 +143,14 @@ struct skcipher_edesc {
  * @qm_sg_dma: I/O virtual address of h/w link table
  * @src_nents: number of segments in input scatterlist
  * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @free: stored to determine if ahash_edesc needs to be freed
  * @sgt: pointer to h/w link table
  */
 struct ahash_edesc {
 	dma_addr_t qm_sg_dma;
 	int src_nents;
 	int qm_sg_bytes;
+	bool free;
 	struct dpaa2_sg_entry sgt[];
 };
 
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ