[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <PH0PR02MB7271E7D12E4971513733A694DE3C9@PH0PR02MB7271.namprd02.prod.outlook.com>
Date: Wed, 23 Feb 2022 09:45:24 +0000
From: Harsha Harsha <harshah@...inx.com>
To: Corentin Labbe <clabbe.montjoie@...il.com>
CC: "herbert@...dor.apana.org.au" <herbert@...dor.apana.org.au>,
"davem@...emloft.net" <davem@...emloft.net>,
"linux-crypto@...r.kernel.org" <linux-crypto@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-arm-kernel@...ts.infradead.org"
<linux-arm-kernel@...ts.infradead.org>,
Michal Simek <michals@...inx.com>,
Sarat Chand Savitala <saratcha@...inx.com>,
Harsh Jain <harshj@...inx.com>, git <git@...inx.com>
Subject: RE: [PATCH V2 3/4] crypto: xilinx: Add Xilinx SHA3 driver
> -----Original Message-----
> From: Corentin Labbe <clabbe.montjoie@...il.com>
> Sent: Tuesday, February 22, 2022 3:32 PM
> To: Harsha Harsha <harshah@...inx.com>
> Cc: herbert@...dor.apana.org.au; davem@...emloft.net; linux-crypto@...r.kernel.org; linux-kernel@...r.kernel.org; linux-arm-
> kernel@...ts.infradead.org; Michal Simek <michals@...inx.com>; Sarat Chand Savitala <saratcha@...inx.com>; Harsh Jain
> <harshj@...inx.com>; git <git@...inx.com>
> Subject: Re: [PATCH V2 3/4] crypto: xilinx: Add Xilinx SHA3 driver
>
> Le Fri, Feb 18, 2022 at 12:44:23AM +0530, Harsha a écrit :
> > This patch adds SHA3 driver support for the Xilinx ZynqMP SoC.
> > Xilinx ZynqMP SoC has SHA3 engine used for secure hash calculation.
> > The flow is
> > SHA3 request from Userspace -> SHA3 driver-> ZynqMp driver-> Firmware ->
> > SHA3 HW Engine
> >
> > SHA3 HW engine in Xilinx ZynqMP SoC, does not support parallel processing
> > of 2 hash requests.
> > Therefore, software fallback is being used for init, update, final,
> > export and import in the ZynqMP SHA driver
> > For digest, the calculation of SHA3 hash is done by the hardened
> > SHA3 accelerator in Xilinx ZynqMP SoC.
> >
> > Signed-off-by: Harsha <harsha.harsha@...inx.com>
> > ---
> > drivers/crypto/Kconfig | 10 ++
> > drivers/crypto/xilinx/Makefile | 1 +
> > drivers/crypto/xilinx/zynqmp-sha.c | 285 +++++++++++++++++++++++++++++++++++++
> > 3 files changed, 296 insertions(+)
> > create mode 100644 drivers/crypto/xilinx/zynqmp-sha.c
> >
> > diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
> > index 4f70567..bf4e55e 100644
> > --- a/drivers/crypto/Kconfig
> > +++ b/drivers/crypto/Kconfig
> > @@ -808,6 +808,16 @@ config CRYPTO_DEV_ZYNQMP_AES
> > accelerator. Select this if you want to use the ZynqMP module
> > for AES algorithms.
> >
> > +config CRYPTO_DEV_ZYNQMP_SHA3
> > + bool "Support for Xilinx ZynqMP SHA3 hardware accelerator"
> > + depends on ARCH_ZYNQMP
> > + select CRYPTO_SHA3
> > + help
> > + Xilinx ZynqMP has SHA3 engine used for secure hash calculation.
> > + This driver interfaces with SHA3 hardware engine.
> > + Select this if you want to use the ZynqMP module
> > + for SHA3 hash computation.
> > +
> > source "drivers/crypto/chelsio/Kconfig"
> >
> > source "drivers/crypto/virtio/Kconfig"
> > diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
> > index 534e32d..730feff 100644
> > --- a/drivers/crypto/xilinx/Makefile
> > +++ b/drivers/crypto/xilinx/Makefile
> > @@ -1,2 +1,3 @@
> > # SPDX-License-Identifier: GPL-2.0-only
> > obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
> > +obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
> > diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
> > new file mode 100644
> > index 0000000..1eaca97
> > --- /dev/null
> > +++ b/drivers/crypto/xilinx/zynqmp-sha.c
> > @@ -0,0 +1,285 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * Xilinx ZynqMP SHA Driver.
> > + * Copyright (c) 2022 Xilinx Inc.
> > + */
> > +#include <linux/cacheflush.h>
> > +#include <crypto/hash.h>
> > +#include <crypto/internal/hash.h>
> > +#include <crypto/sha3.h>
> > +#include <linux/crypto.h>
> > +#include <linux/device.h>
> > +#include <linux/dma-mapping.h>
> > +#include <linux/firmware/xlnx-zynqmp.h>
> > +#include <linux/init.h>
> > +#include <linux/io.h>
> > +#include <linux/kernel.h>
> > +#include <linux/module.h>
> > +#include <linux/of_device.h>
> > +#include <linux/platform_device.h>
> > +
> > +#define ZYNQMP_DMA_BIT_MASK 32U
> > +#define ZYNQMP_DMA_ALLOC_FIXED_SIZE 0x1000U
> > +
> > +enum zynqmp_sha_op {
> > + ZYNQMP_SHA3_INIT = 1,
> > + ZYNQMP_SHA3_UPDATE = 2,
> > + ZYNQMP_SHA3_FINAL = 4,
> > +};
> > +
> > +struct zynqmp_sha_drv_ctx {
> > + struct shash_alg sha3_384;
> > + struct device *dev;
> > +};
> > +
> > +struct zynqmp_sha_tfm_ctx {
> > + struct device *dev;
> > + struct crypto_shash *fbk_tfm;
> > +};
> > +
> > +struct zynqmp_sha_desc_ctx {
> > + struct shash_desc fbk_req;
> > +};
> > +
> > +static dma_addr_t update_dma_addr, final_dma_addr;
> > +static char *ubuf, *fbuf;
> > +
> > +static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
> > +{
> > + const char *fallback_driver_name = crypto_shash_alg_name(hash);
> > + struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
> > + struct shash_alg *alg = crypto_shash_alg(hash);
> > + struct crypto_shash *fallback_tfm;
> > + struct zynqmp_sha_drv_ctx *drv_ctx;
> > +
> > + drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384);
> > + tfm_ctx->dev = drv_ctx->dev;
> > +
> > + /* Allocate a fallback and abort if it failed. */
> > + fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
> > + CRYPTO_ALG_NEED_FALLBACK);
> > + if (IS_ERR(fallback_tfm))
> > + return PTR_ERR(fallback_tfm);
> > +
> > + tfm_ctx->fbk_tfm = fallback_tfm;
> > + hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
> > +
> > + return 0;
> > +}
> > +
> > +static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
> > +{
> > + struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
> > +
> > + if (tfm_ctx->fbk_tfm) {
> > + crypto_free_shash(tfm_ctx->fbk_tfm);
> > + tfm_ctx->fbk_tfm = NULL;
> > + }
> > +
> > + memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
> > +}
> > +
> > +static int zynqmp_sha_init(struct shash_desc *desc)
> > +{
> > + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
> > + struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
> > +
> > + dctx->fbk_req.tfm = tctx->fbk_tfm;
> > + return crypto_shash_init(&dctx->fbk_req);
> > +}
> > +
> > +static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
> > +{
> > + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
> > +
> > + return crypto_shash_update(&dctx->fbk_req, data, length);
> > +}
> > +
> > +static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
> > +{
> > + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
> > +
> > + return crypto_shash_final(&dctx->fbk_req, out);
> > +}
> > +
> > +static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
> > +{
> > + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
> > +
> > + return crypto_shash_finup(&dctx->fbk_req, data, length, out);
> > +}
> > +
> > +static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
> > +{
> > + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
> > + struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
> > +
> > + dctx->fbk_req.tfm = tctx->fbk_tfm;
> > + return crypto_shash_import(&dctx->fbk_req, in);
> > +}
> > +
> > +static int zynqmp_sha_export(struct shash_desc *desc, void *out)
> > +{
> > + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
> > +
> > + return crypto_shash_export(&dctx->fbk_req, out);
> > +}
> > +
> > +static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
> > +{
> > + unsigned int remaining_len = len;
> > + int update_size;
> > + int ret;
> > +
> > + ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT);
> > + if (ret)
> > + return ret;
> > +
> > + while (remaining_len != 0) {
> > + memset(ubuf, 0, ZYNQMP_DMA_ALLOC_FIXED_SIZE);
> > + if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) {
> > + update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE;
> > + remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE;
> > + } else {
> > + update_size = remaining_len;
> > + remaining_len = 0;
> > + }
> > + memcpy(ubuf, data, update_size);
> > + flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size);
>
> Hello
>
> Why do you copy all data before processing and not use them in-place ?
The firmware expects a DMA capable memory in the lower-32 bit address space. This is the reason we cannot use the data in-place.
ubuf is a pointer to the allocated region (in the processor's virtual
address space) returned by dma_alloc_coherent(). The driver copies the data in ubuf and
passes the associated dma handle to the firmware. This is the reason we cannot use the data in-place.
>
>
> > + ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE);
> > + if (ret)
> > + return ret;
> > +
> > + data += update_size;
> > + }
> > +
> > + ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL);
> > + memcpy(out, fbuf, SHA3_384_DIGEST_SIZE);
> > + memset(fbuf, 0, SHA3_384_DIGEST_SIZE);
>
> You should use memzero_explicit()
I will fix this in next version of patch.
>
> Regards
Regards,
Harsha
Powered by blists - more mailing lists