[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220719021042.250882-1-shaozhengchao@huawei.com>
Date: Tue, 19 Jul 2022 10:10:42 +0800
From: Zhengchao Shao <shaozhengchao@...wei.com>
To: <linux-crypto@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<liulongfang@...wei.com>, <herbert@...dor.apana.org.au>,
<davem@...emloft.net>
CC: <xuzaibo@...wei.com>, <weiyongjun1@...wei.com>,
<yuehaibing@...wei.com>, <shaozhengchao@...wei.com>
Subject: [PATCH] crypto: hisilicon/hpre - don't use GFP_KERNEL to alloc mem during softirq
The hpre encryption driver may be used to encrypt and decrypt packets
during the rx softirq, it is not allowed to use GFP_KERNEL.
Fixes: c8b4b477079d ("crypto: hisilicon - add HiSilicon HPRE accelerator")
Signed-off-by: Zhengchao Shao <shaozhengchao@...wei.com>
This patch is not tested, compiled only.
---
drivers/crypto/hisilicon/hpre/hpre_crypto.c | 28 +++++++++++++--------
1 file changed, 18 insertions(+), 10 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 97d54c1465c2..cf098fa673f4 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -241,7 +241,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ int is_src, dma_addr_t *tmp, gfp_t flags)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = ctx->dev;
@@ -252,7 +252,7 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
if (unlikely(shift < 0))
return -EINVAL;
- ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, flags);
if (unlikely(!ptr))
return -ENOMEM;
@@ -268,7 +268,7 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
struct scatterlist *data, unsigned int len,
- int is_src, int is_dh)
+ int is_src, int is_dh, u32 flags)
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
@@ -280,7 +280,9 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
((is_dh && !is_src) || !is_dh))
ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
else
- ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
+ ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp,
+ (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
if (unlikely(ret))
return ret;
@@ -585,14 +587,16 @@ static int hpre_dh_compute_value(struct kpp_request *req)
return ret;
if (req->src) {
- ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1,
+ req->base.flags);
if (unlikely(ret))
goto clear_all;
} else {
msg->in = cpu_to_le64(ctx->dh.dma_g);
}
- ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1,
+ req->base.flags);
if (unlikely(ret))
goto clear_all;
@@ -800,11 +804,13 @@ static int hpre_rsa_enc(struct akcipher_request *req)
msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
- ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0,
+ req->base.flags);
if (unlikely(ret))
goto clear_all;
- ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0,
+ req->base.flags);
if (unlikely(ret))
goto clear_all;
@@ -855,11 +861,13 @@ static int hpre_rsa_dec(struct akcipher_request *req)
HPRE_ALG_NC_NCRT);
}
- ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0,
+ req->base.flags);
if (unlikely(ret))
goto clear_all;
- ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0,
+ req->base.flags);
if (unlikely(ret))
goto clear_all;
--
2.17.1
Powered by blists - more mailing lists