[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200422095053.829913671@linuxfoundation.org>
Date: Wed, 22 Apr 2020 11:56:11 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Eneas U de Queiroz <cotequeiroz@...il.com>,
Herbert Xu <herbert@...dor.apana.org.au>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.6 044/166] crypto: qce - use cryptlen when adding extra sgl
From: Eneas U de Queiroz <cotequeiroz@...il.com>
[ Upstream commit d6364b8128439a8c0e381f80c38667de9f15eef8 ]
The qce crypto driver appends an extra entry to the dst sgl, to maintain
private state information.
When the gcm driver sends requests to the ctr skcipher, it passes the
authentication tag after the actual crypto payload, but it must not be
touched.
Commit 1336c2221bee ("crypto: qce - save a sg table slot for result
buf") limited the destination sgl to avoid overwriting the
authentication tag but it assumed the tag would be in a separate sgl
entry.
This is not always the case, so it is better to limit the length of the
destination buffer to req->cryptlen before appending the result buf.
Signed-off-by: Eneas U de Queiroz <cotequeiroz@...il.com>
Signed-off-by: Herbert Xu <herbert@...dor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/crypto/qce/dma.c | 11 ++++++-----
drivers/crypto/qce/dma.h | 2 +-
drivers/crypto/qce/skcipher.c | 5 +++--
3 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 7da893dc00e73..46db5bf366b44 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -48,9 +48,10 @@ void qce_dma_release(struct qce_dma_data *dma)
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
- int max_ents)
+ unsigned int max_len)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
+ unsigned int new_len;
while (sg) {
if (!sg_page(sg))
@@ -61,13 +62,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg && max_ents) {
- sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
- new_sgl->offset);
+ while (new_sgl && sg && max_len) {
+ new_len = new_sgl->length > max_len ? max_len : new_sgl->length;
+ sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
- max_ents--;
+ max_len -= new_len;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index ed25a0d9829e5..7864021693608 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -43,6 +43,6 @@ void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
- int max_ents);
+ unsigned int max_len);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 4217b745f1242..63ae75809cb70 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -97,13 +97,14 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
+ QCE_RESULT_BUF_SZ);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
--
2.20.1
Powered by blists - more mailing lists