lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <a85c4f87-c87f-784f-7db3-0b83570f82da@wanadoo.fr>
Date:   Tue, 7 Jun 2022 07:00:31 +0200
From:   Christophe JAILLET <christophe.jaillet@...adoo.fr>
To:     Neal Liu <neal_liu@...eedtech.com>
Cc:     linux-aspeed@...ts.ozlabs.org, linux-crypto@...r.kernel.org,
        devicetree@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
        linux-kernel@...r.kernel.org, BMC-SW@...eedtech.com,
        Herbert Xu <herbert@...dor.apana.org.au>,
        "David S . Miller" <davem@...emloft.net>,
        Rob Herring <robh+dt@...nel.org>,
        Krzysztof Kozlowski <krzysztof.kozlowski+dt@...aro.org>,
        Joel Stanley <joel@....id.au>,
        Andrew Jeffery <andrew@...id.au>,
        Johnny Huang <johnny_huang@...eedtech.com>
Subject: Re: [PATCH v2 5/5] crypto: aspeed: add HACE crypto driver

Le 07/06/2022 à 05:53, Neal Liu a écrit :
>> Le 06/06/2022 à 08:49, Neal Liu a écrit :
>>> Add HACE crypto driver to support symmetric-key encryption and
>>> decryption with multiple modes of operation.
>>>
>>> Signed-off-by: Neal Liu <neal_liu@...eedtech.com>
>>> Signed-off-by: Johnny Huang <johnny_huang@...eedtech.com>
>>> ---
>>
>> [...]
>>
>>> +static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev) {
>>> +	struct aspeed_engine_crypto *crypto_engine =
>> &hace_dev->crypto_engine;
>>> +	struct device *dev = hace_dev->dev;
>>> +	struct aspeed_cipher_reqctx *rctx;
>>> +	struct skcipher_request *req;
>>> +
>>> +	CIPHER_DBG(hace_dev, "\n");
>>> +
>>> +	req = skcipher_request_cast(crypto_engine->areq);
>>> +	rctx = skcipher_request_ctx(req);
>>> +
>>> +	if (req->src == req->dst) {
>>> +		dma_unmap_sg(dev, req->src, rctx->src_nents,
>> DMA_BIDIRECTIONAL);
>>> +
>>
>> Unneeded empty line.
> 
> Okay !
> 
>>
>>> +	} else {
>>> +		dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
>>> +		dma_unmap_sg(dev, req->dst, rctx->dst_nents,
>> DMA_FROM_DEVICE);
>>> +	}
>>> +
>>> +	return aspeed_sk_complete(hace_dev, 0); }
>>> +
>>
>> [...]
>>
>>> +static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev) {
>>> +	struct aspeed_engine_crypto *crypto_engine =
>> &hace_dev->crypto_engine;
>>> +	struct aspeed_sg_list *src_list, *dst_list;
>>> +	dma_addr_t src_dma_addr, dst_dma_addr;
>>> +	struct aspeed_cipher_reqctx *rctx;
>>> +	struct skcipher_request *req;
>>> +	struct scatterlist *s;
>>> +	int src_sg_len;
>>> +	int dst_sg_len;
>>> +	int total, i;
>>> +	int rc;
>>> +
>>> +	CIPHER_DBG(hace_dev, "\n");
>>> +
>>> +	req = skcipher_request_cast(crypto_engine->areq);
>>> +	rctx = skcipher_request_ctx(req);
>>> +
>>> +	rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL |
>> HACE_CMD_SRC_SG_CTRL |
>>> +			 HACE_CMD_AES_KEY_HW_EXP |
>> HACE_CMD_MBUS_REQ_SYNC_EN;
>>> +
>>> +	/* BIDIRECTIONAL */
>>> +	if (req->dst == req->src) {
>>> +		src_sg_len = dma_map_sg(hace_dev->dev, req->src,
>>> +					rctx->src_nents, DMA_BIDIRECTIONAL);
>>> +		dst_sg_len = src_sg_len;
>>> +		if (!src_sg_len) {
>>> +			dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
>>> +			return -EINVAL;
>>> +		}
>>> +
>>> +	} else {
>>> +		src_sg_len = dma_map_sg(hace_dev->dev, req->src,
>>> +					rctx->src_nents, DMA_TO_DEVICE);
>>> +		if (!src_sg_len) {
>>> +			dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
>>> +			return -EINVAL;
>>> +		}
>>> +
>>> +		dst_sg_len = dma_map_sg(hace_dev->dev, req->dst,
>>> +					rctx->dst_nents, DMA_FROM_DEVICE);
>>> +		if (!dst_sg_len) {
>>> +			dev_warn(hace_dev->dev, "dma_map_sg() dst error\n");
>>> +			rc = -EINVAL;
>>> +			goto free_req_src;
>>
>> Should we realy call dma_unmap_sg() if dma_map_sg() fails?
> 
> This error handling is unmap() the above buffer (req->src), not really this buffer (req->dst).
> I think it should.

You are right, I missread it. Sorry for the noise.

> 
>>
>>> +		}
>>> +	}
>>> +
>>> +	src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr;
>>> +	src_dma_addr = crypto_engine->cipher_dma_addr;
>>> +	total = req->cryptlen;
>>> +
>>> +	for_each_sg(req->src, s, src_sg_len, i) {
>>> +		src_list[i].phy_addr = sg_dma_address(s);
>>> +
>>> +		/* last sg list */
>>> +		if (sg_dma_len(s) >= total) {
>>> +			src_list[i].len = total;
>>> +			src_list[i].len |= BIT(31);
>>> +			total = 0;
>>> +			break;
>>> +		}
>>> +
>>> +		src_list[i].len = sg_dma_len(s);
>>> +		total -= src_list[i].len;
>>> +	}
>>> +
>>> +	if (total != 0)
>>> +		return -EINVAL;
>>
>> goto free_req_src; ?
> 
> Yes, I miss this part. I'll revise it in next patch, thanks.

There is another one below...

> 
>>
>>> +
>>> +	if (req->dst == req->src) {
>>> +		dst_list = src_list;
>>> +		dst_dma_addr = src_dma_addr;
>>> +
>>> +	} else {
>>> +		dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr;
>>> +		dst_dma_addr = crypto_engine->dst_sg_dma_addr;
>>> +		total = req->cryptlen;
>>> +
>>> +		for_each_sg(req->dst, s, dst_sg_len, i) {
>>> +			dst_list[i].phy_addr = sg_dma_address(s);
>>> +
>>> +			/* last sg list */
>>> +			if (sg_dma_len(s) >= total) {
>>> +				dst_list[i].len = total;
>>> +				dst_list[i].len |= BIT(31);
>>> +				total = 0;
>>> +				break;
>>> +			}
>>> +
>>> +			dst_list[i].len = sg_dma_len(s);
>>> +			total -= dst_list[i].len;
>>> +		}
>>> +
>>> +		dst_list[dst_sg_len].phy_addr = 0;
>>> +		dst_list[dst_sg_len].len = 0;
>>> +	}
>>> +
>>> +	if (total != 0)
>>> +		return -EINVAL;

... here.

>>> +
>>> +	crypto_engine->resume = aspeed_sk_transfer_sg;
>>> +
>>> +	/* Dummy read for barriers */
>>> +	readl(src_list);
>>> +	readl(dst_list);
>>> +
>>> +	/* Trigger engines */
>>> +	ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC);
>>> +	ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST);
>>> +	ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
>>> +	ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
>>> +
>>> +	return -EINPROGRESS;
>>> +
>>> +free_req_src:
>>> +	dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
>>> +DMA_TO_DEVICE);
>>> +
>>> +	return rc;
>>> +}
>>> +
>>
>> [...]
>>
>>> +static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
>>> +			     unsigned int keylen)
>>> +{
>>> +	struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
>>> +	struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
>>> +	struct crypto_aes_ctx gen_aes_key;
>>> +
>>> +	CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8));
>>> +
>>> +	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
>>> +	    keylen != AES_KEYSIZE_256)
>>> +		return -EINVAL;
>>> +
>>> +	if (ctx->hace_dev->version == AST2500_VERSION) {
>>> +		aes_expandkey(&gen_aes_key, key, keylen);
>>> +		memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH);
>>> +
>>
>> Unneeded empty line
> 
> Okay !
> 
>>
>>> +	} else {
>>> +		memcpy(ctx->key, key, keylen);
>>> +	}
>>> +
>>> +	ctx->key_len = keylen;
>>> +
>>> +	return 0;
>>> +}
>>> +
>>
>> [...]
>>
>>> +	crypto_engine->cipher_ctx =
>>> +		dma_alloc_coherent(&pdev->dev,
>>> +				   PAGE_SIZE,
>>> +				   &crypto_engine->cipher_ctx_dma,
>>> +				   GFP_KERNEL);
>>> +	if (!crypto_engine->cipher_ctx) {
>>> +		dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n");
>>> +		rc = -ENOMEM;
>>> +		goto free_hash_src;
>>> +	}
>>> +
>>> +	crypto_engine->cipher_addr =
>>> +		dma_alloc_coherent(&pdev->dev,
>>> +				   ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
>>> +				   &crypto_engine->cipher_dma_addr,
>>> +				   GFP_KERNEL);
>>> +	if (!crypto_engine->cipher_addr) {
>>> +		dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n");
>>> +		rc = -ENOMEM;
>>> +		goto free_cipher_ctx;
>>> +	}
>>> +
>>> +	if (hace_dev->version == AST2600_VERSION) {
>>> +		crypto_engine->dst_sg_addr =
>>> +			dma_alloc_coherent(&pdev->dev,
>>> +					   ASPEED_CRYPTO_DST_DMA_BUF_LEN,
>>> +					   &crypto_engine->dst_sg_dma_addr,
>>> +					   GFP_KERNEL);
>>> +		if (!crypto_engine->dst_sg_addr) {
>>> +			dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n");
>>> +			rc = -ENOMEM;
>>> +			goto free_cipher_addr;
>>> +		}
>>> +	}
>>> +
>>>    	rc = aspeed_hace_register(hace_dev);
>>>    	if (rc) {
>>>    		dev_err(&pdev->dev, "Failed to register algs, rc:0x%x\n", rc);
>>
>> I guess that the new dma_alloc_coherent() just a few lines above should also
>> be undone in error hanfling path if aspeed_hace_register() fails?
> 
> I'll remove the return value (rc) since it's useless here. So no need error handling on this part.
> I'll revise it in next patch, thanks.
> 
>>
>>> @@ -179,6 +282,18 @@ static int aspeed_hace_probe(struct
>>> platform_device *pdev)
>>>
>>>    	return 0;
>>>
>>> +free_cipher_addr:
>>> +	dma_free_coherent(&pdev->dev, ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
>>> +			  crypto_engine->cipher_addr,
>>> +			  crypto_engine->cipher_dma_addr);
>>> +free_cipher_ctx:
>>> +	dma_free_coherent(&pdev->dev, PAGE_SIZE,
>>> +			  crypto_engine->cipher_ctx,
>>> +			  crypto_engine->cipher_ctx_dma);
>>> +free_hash_src:
>>> +	dma_free_coherent(&pdev->dev, ASPEED_HASH_SRC_DMA_BUF_LEN,
>>> +			  hash_engine->ahash_src_addr,
>>> +			  hash_engine->ahash_src_dma_addr);
>>>    end:
>>>    	clk_disable_unprepare(hace_dev->clk);
>>>    	return rc;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ