lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 30 Aug 2016 18:53:52 +0300
From:   Iaroslav Gridin <voker57@...il.com>
To:     herbert@...dor.apana.org.au
Cc:     davem@...emloft.net, linux-crypto@...r.kernel.org,
        linux-kernel@...r.kernel.org, andy.gross@...aro.org,
        david.brown@...aro.org, linux-arm-msm@...r.kernel.org,
        linux-soc@...r.kernel.org, Voker57 <voker57@...il.com>
Subject: [PATCH 3/4] crypto: qce: Ensure QCE receives no zero-sized updates

From: Voker57 <voker57@...il.com>

Zero-sized updates lock QCE, so ensure there's always some data left
for the final update, up to blocksize.
Signed-off-by: Iaroslav Gridin <voker57@...il.com>
---
 drivers/crypto/qce/sha.c | 30 ++++++++++++++++++++----------
 1 file changed, 20 insertions(+), 10 deletions(-)

diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index a068d39..f199f28 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -240,9 +240,11 @@ static int qce_ahash_update(struct ahash_request *req)
 	struct qce_device *qce = tmpl->qce;
 	struct scatterlist *sg_last, *sg;
 	unsigned int total, len;
+	unsigned int tmpbuflen = 0;
 	unsigned int hash_later;
 	unsigned int nbytes;
 	unsigned int blocksize;
+	unsigned int src_offset;
 
 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 	rctx->count += req->nbytes;
@@ -265,21 +267,30 @@ static int qce_ahash_update(struct ahash_request *req)
 	 * if we have data from previous update copy them on buffer. The old
 	 * data will be combined with current request bytes.
 	 */
-	if (rctx->buflen)
+	if (rctx->buflen) {
 		memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+		tmpbuflen = rctx->buflen;
+	}
 
 	/* calculate how many bytes will be hashed later */
 	hash_later = total % blocksize;
-	if (hash_later) {
-		unsigned int src_offset = req->nbytes - hash_later;
-		scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
-					 hash_later, 0);
-	}
+	/* ensure we always have something on buffer */
+	if (hash_later == 0)
+		hash_later = blocksize;
+	src_offset = req->nbytes - hash_later;
+	scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
+				 hash_later, 0);
+	rctx->buflen = hash_later;
 
 	/* here nbytes is multiple of blocksize */
 	nbytes = total - hash_later;
 
-	len = rctx->buflen;
+	len = tmpbuflen;
+
+	/* Zero-length update is a no-op */
+	if (nbytes == 0)
+		return 0;
+
 	sg = sg_last = req->src;
 
 	while (len < nbytes && sg) {
@@ -293,15 +304,14 @@ static int qce_ahash_update(struct ahash_request *req)
 
 	sg_mark_end(sg_last);
 
-	if (rctx->buflen) {
+	if (tmpbuflen) {
 		sg_init_table(rctx->sg, 2);
-		sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
+		sg_set_buf(rctx->sg, rctx->tmpbuf, tmpbuflen);
 		sg_chain(rctx->sg, 2, req->src);
 		req->src = rctx->sg;
 	}
 
 	req->nbytes = nbytes;
-	rctx->buflen = hash_later;
 
 	return qce->async_req_enqueue(tmpl->qce, &req->base);
 }
-- 
2.9.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ