[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190520115250.343985241@linuxfoundation.org>
Date: Mon, 20 May 2019 14:13:54 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Gilad Ben-Yossef <gilad@...yossef.com>,
Herbert Xu <herbert@...dor.apana.org.au>
Subject: [PATCH 4.19 048/105] crypto: ccree - dont map AEAD key and IV on stack
From: Gilad Ben-Yossef <gilad@...yossef.com>
commit e8662a6a5f8f7f2cadc0edb934aef622d96ac3ee upstream.
The AEAD authenc key and IVs might be passed to us on stack. Copy it to
a slab buffer before mapping to gurantee proper DMA mapping.
Signed-off-by: Gilad Ben-Yossef <gilad@...yossef.com>
Cc: stable@...r.kernel.org # v4.19+
Signed-off-by: Herbert Xu <herbert@...dor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/crypto/ccree/cc_aead.c | 11 ++++++++++-
drivers/crypto/ccree/cc_buffer_mgr.c | 15 ++++++++++++---
drivers/crypto/ccree/cc_driver.h | 1 +
3 files changed, 23 insertions(+), 4 deletions(-)
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -415,7 +415,7 @@ static int validate_keys_sizes(struct cc
/* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block
*/
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
@@ -428,6 +428,7 @@ static int cc_get_plain_hmac_key(struct
unsigned int hashmode;
unsigned int idx = 0;
int rc = 0;
+ u8 *key = NULL;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -446,11 +447,17 @@ static int cc_get_plain_hmac_key(struct
}
if (keylen != 0) {
+
+ key = kmemdup(authkey, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(key);
return -ENOMEM;
}
if (keylen > blocksize) {
@@ -533,6 +540,8 @@ static int cc_get_plain_hmac_key(struct
if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+ kzfree(key);
+
return rc;
}
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -560,6 +560,7 @@ void cc_unmap_aead_request(struct device
if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL);
+ kzfree(areq_ctx->gen_ctx.iv);
}
/* Release pool */
@@ -664,19 +665,27 @@ static int cc_aead_chain_iv(struct cc_dr
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
+ gfp_t flags = cc_gfp_flags(&req->base);
int rc = 0;
if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
+ areq_ctx->gen_ctx.iv = NULL;
goto chain_iv_exit;
}
- areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
- hw_iv_size,
- DMA_BIDIRECTIONAL);
+ areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+ if (!areq_ctx->gen_ctx.iv)
+ return -ENOMEM;
+
+ areq_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
+ kzfree(areq_ctx->gen_ctx.iv);
+ areq_ctx->gen_ctx.iv = NULL;
rc = -ENOMEM;
goto chain_iv_exit;
}
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -162,6 +162,7 @@ struct cc_alg_template {
struct async_gen_req_ctx {
dma_addr_t iv_dma_addr;
+ u8 *iv;
enum drv_crypto_direction op_type;
};
Powered by blists - more mailing lists