[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAKv+Gu-KBgiyNY2Dypx6vqtmpTXNfOxxWxJf50BTiF2rCOFqnw@mail.gmail.com>
Date: Thu, 30 May 2019 16:31:09 +0200
From: Ard Biesheuvel <ard.biesheuvel@...aro.org>
To: Herbert Xu <herbert@...dor.apana.org.au>
Cc: Iuliana Prodan <iuliana.prodan@....com>,
Eric Biggers <ebiggers@...nel.org>,
"David S. Miller" <davem@...emloft.net>,
Horia Geanta <horia.geanta@....com>,
Sascha Hauer <s.hauer@...gutronix.de>,
"open list:HARDWARE RANDOM NUMBER GENERATOR CORE"
<linux-crypto@...r.kernel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
dl-linux-imx <linux-imx@....com>
Subject: Re: [PATCH] crypto: gcm - fix cacheline sharing
On Thu, 30 May 2019 at 16:28, Ard Biesheuvel <ard.biesheuvel@...aro.org> wrote:
>
> On Thu, 30 May 2019 at 16:27, Herbert Xu <herbert@...dor.apana.org.au> wrote:
> >
> > On Thu, May 30, 2019 at 03:55:07PM +0200, Ard Biesheuvel wrote:
> > >
> > > > Would this work?
> >
> > I see. You need to preserve the original IV.
> >
> > > > diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
> > > > index c0ece44f303b..2ef2f76a3cb8 100644
> > > > --- a/drivers/crypto/caam/caamalg.c
> > > > +++ b/drivers/crypto/caam/caamalg.c
> > > > @@ -1832,22 +1832,25 @@ static int skcipher_decrypt(struct
> > > > skcipher_request *req)
> > > > struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
> > > > int ivsize = crypto_skcipher_ivsize(skcipher);
> > > > struct device *jrdev = ctx->jrdev;
> > > > + u8 out_iv[AES_BLOCK_SIZE];
> > > > u32 *desc;
> > > > int ret = 0;
> > > >
> > > > - /* allocate extended descriptor */
> > > > - edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
> > > > - if (IS_ERR(edesc))
> > > > - return PTR_ERR(edesc);
> > > > -
> > > > /*
> > > > * The crypto API expects us to set the IV (req->iv) to the last
> > > > * ciphertext block.
> > > > */
> > > > if (ivsize)
> > > > - scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
> > > > + scatterwalk_map_and_copy(out_iv, req->src, req->cryptlen -
> > > > ivsize, ivsize, 0);
> > > >
> > > > + /* allocate extended descriptor */
> > > > + edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
> > > > + if (IS_ERR(edesc))
> > > > + return PTR_ERR(edesc);
> > > > +
> > > > + memcpy(req->iv, out_iv, ivsize);
> > > > +
> > > > /* Create and submit job descriptor*/
> > > > init_skcipher_job(req, edesc, false);
> > > > desc = edesc->hw_desc;
> > >
> > > Umm never mind
> > >
> > > /me hides in shame
> >
> > So why doesn't this work?
> >
>
> Because the memcpy() occurs while the buffer is mapped for DMA, so it
> suffers from the exact same problem.
This might work:
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c0ece44f303b..3d313d2a279a 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1661,7 +1661,8 @@ static int aead_decrypt(struct aead_request *req)
* allocate and map the skcipher extended descriptor for skcipher
*/
static struct skcipher_edesc *skcipher_edesc_alloc(struct
skcipher_request *req,
- int desc_bytes)
+ int desc_bytes,
+ u8 const *input_iv)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
@@ -1745,7 +1746,7 @@ static struct skcipher_edesc
*skcipher_edesc_alloc(struct skcipher_request *req,
/* Make sure IV is located in a DMAable area */
if (ivsize) {
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- memcpy(iv, req->iv, ivsize);
+ memcpy(iv, input_iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
@@ -1801,7 +1802,8 @@ static int skcipher_encrypt(struct skcipher_request *req)
int ret = 0;
/* allocate extended descriptor */
- edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ,
+ req->iv);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1832,13 +1834,11 @@ static int skcipher_decrypt(struct
skcipher_request *req)
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
int ivsize = crypto_skcipher_ivsize(skcipher);
struct device *jrdev = ctx->jrdev;
+ u8 in_iv[AES_BLOCK_SIZE];
u32 *desc;
int ret = 0;
- /* allocate extended descriptor */
- edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
+ memcpy(in_iv, req->iv, ivsize);
/*
* The crypto API expects us to set the IV (req->iv) to the last
@@ -1848,6 +1848,11 @@ static int skcipher_decrypt(struct skcipher_request *req)
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
ivsize, ivsize, 0);
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ, in_iv);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
/* Create and submit job descriptor*/
init_skcipher_job(req, edesc, false);
desc = edesc->hw_desc;
Powered by blists - more mailing lists