[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201214200232.17357-4-clabbe@baylibre.com>
Date: Mon, 14 Dec 2020 20:02:27 +0000
From: Corentin Labbe <clabbe@...libre.com>
To: arnd@...db.de, davem@...emloft.net, herbert@...dor.apana.org.au,
jernej.skrabec@...l.net, mripard@...nel.org, wens@...e.org
Cc: linux-arm-kernel@...ts.infradead.org, linux-crypto@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-sunxi@...glegroups.com,
Corentin Labbe <clabbe@...libre.com>, stable@...r.kernel.org
Subject: [PATCH v4 3/8] crypto: sun4i-ss: IV register does not work on A10 and A13
Allwinner A10 and A13 SoC have a version of the SS which produce
invalid IV in IVx register.
Instead of adding a variant for those, let's convert SS to produce IV
directly from data.
Fixes: 6298e948215f2 ("crypto: sunxi-ss - Add Allwinner Security System crypto accelerator")
Cc: <stable@...r.kernel.org>
Signed-off-by: Corentin Labbe <clabbe@...libre.com>
---
.../allwinner/sun4i-ss/sun4i-ss-cipher.c | 34 +++++++++++++++----
1 file changed, 28 insertions(+), 6 deletions(-)
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index f49797588329..c7bf731dad7b 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -20,6 +20,7 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
u32 mode = ctx->mode;
+ void *backup_iv = NULL;
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
u32 rx_cnt = SS_RX_DEFAULT;
u32 tx_cnt = 0;
@@ -42,6 +43,13 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
return -EINVAL;
}
+ if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv)
+ return -ENOMEM;
+ scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ }
+
spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4)
@@ -102,9 +110,12 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
} while (oleft);
if (areq->iv) {
- for (i = 0; i < 4 && i < ivsize / 4; i++) {
- v = readl(ss->base + SS_IV0 + i * 4);
- *(u32 *)(areq->iv + i * 4) = v;
+ if (mode & SS_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ kfree_sensitive(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
+ ivsize, 0);
}
}
@@ -161,6 +172,7 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int ileft = areq->cryptlen;
unsigned int oleft = areq->cryptlen;
unsigned int todo;
+ void *backup_iv = NULL;
struct sg_mapping_iter mi, mo;
unsigned int oi, oo; /* offset for in and out */
unsigned int ob = 0; /* offset in buf */
@@ -202,6 +214,13 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
if (need_fallback)
return sun4i_ss_cipher_poll_fallback(areq);
+ if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv)
+ return -ENOMEM;
+ scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ }
+
spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4)
@@ -322,9 +341,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
}
}
if (areq->iv) {
- for (i = 0; i < 4 && i < ivsize / 4; i++) {
- v = readl(ss->base + SS_IV0 + i * 4);
- *(u32 *)(areq->iv + i * 4) = v;
+ if (mode & SS_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ kfree_sensitive(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
+ ivsize, 0);
}
}
--
2.26.2
Powered by blists - more mailing lists