[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200817143835.200745355@linuxfoundation.org>
Date: Mon, 17 Aug 2020 17:09:43 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
Giovanni Cabiddu <giovanni.cabiddu@...el.com>,
Herbert Xu <herbert@...dor.apana.org.au>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.8 030/464] crypto: qat - allow xts requests not multiple of block
From: Giovanni Cabiddu <giovanni.cabiddu@...el.com>
[ Upstream commit 528f776df67c440361b2847b4da400d8754bf030 ]
Allow AES-XTS requests that are not multiple of the block size.
If a request is smaller than the block size, return -EINVAL.
This fixes the following issue reported by the crypto testmgr self-test:
alg: skcipher: qat_aes_xts encryption failed on test vector "random: len=116 klen=64"; expected_error=0, actual_error=-22, cfg="random: inplace may_sleep use_finup src_divs=[<reimport>45.85%@...77, <flush>54.15%@...gnmask+18]"
Fixes: 96ee111a659e ("crypto: qat - return error for block...")
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@...el.com>
Signed-off-by: Herbert Xu <herbert@...dor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/crypto/qat/qat_common/qat_algs.c | 22 ++++++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index e14d3dd291f09..1b050391c0c90 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -55,6 +55,7 @@
#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
+#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -1102,6 +1103,14 @@ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
return qat_alg_skcipher_encrypt(req);
}
+static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
+{
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ return qat_alg_skcipher_encrypt(req);
+}
+
static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
@@ -1161,6 +1170,15 @@ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
return qat_alg_skcipher_decrypt(req);
}
+
+static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
+{
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ return qat_alg_skcipher_decrypt(req);
+}
+
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
@@ -1354,8 +1372,8 @@ static struct skcipher_alg qat_skciphers[] = { {
.init = qat_alg_skcipher_init_tfm,
.exit = qat_alg_skcipher_exit_tfm,
.setkey = qat_alg_skcipher_xts_setkey,
- .decrypt = qat_alg_skcipher_blk_decrypt,
- .encrypt = qat_alg_skcipher_blk_encrypt,
+ .decrypt = qat_alg_skcipher_xts_decrypt,
+ .encrypt = qat_alg_skcipher_xts_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
--
2.25.1
Powered by blists - more mailing lists