[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250711182932.918257-1-ovidiu.panait.oss@gmail.com>
Date: Fri, 11 Jul 2025 21:29:31 +0300
From: Ovidiu Panait <ovidiu.panait.oss@...il.com>
To: herbert@...dor.apana.org.au,
davem@...emloft.net,
linux-crypto@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: Ovidiu Panait <ovidiu.panait.oss@...il.com>,
freude@...ux.ibm.com,
dengler@...ux.ibm.com,
linux-s390@...r.kernel.org,
horia.geanta@....com,
pankaj.gupta@....com,
gaurav.jain@....com,
arei.gonglei@...wei.com,
virtualization@...ts.linux.dev
Subject: [PATCH 1/2] crypto: engine: remove request batching support
Remove request batching support from crypto_engine, as there are no
drivers using this feature and it doesn't really work that well.
Instead of doing batching based on backlog, a more optimal approach
would be for the user to handle the batching (similar to how IPsec
can hook into GSO to get 64K of data each time or how block encryption
can use unit sizes much greater than 4K).
Suggested-by: Herbert Xu <herbert@...dor.apana.org.au>
Signed-off-by: Ovidiu Panait <ovidiu.panait.oss@...il.com>
---
Cc: freude@...ux.ibm.com
Cc: dengler@...ux.ibm.com
Cc: linux-s390@...r.kernel.org
Cc: horia.geanta@....com
Cc: pankaj.gupta@....com
Cc: gaurav.jain@....com
Cc: arei.gonglei@...wei.com
Cc: virtualization@...ts.linux.dev
arch/s390/crypto/paes_s390.c | 2 +-
arch/s390/crypto/phmac_s390.c | 2 +-
crypto/crypto_engine.c | 25 +---------------------
drivers/crypto/caam/jr.c | 3 +--
drivers/crypto/virtio/virtio_crypto_core.c | 2 +-
include/crypto/engine.h | 1 -
include/crypto/internal/engine.h | 4 ----
7 files changed, 5 insertions(+), 34 deletions(-)
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 8a340c16acb4..a624a43a2b54 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -1633,7 +1633,7 @@ static int __init paes_s390_init(void)
/* with this pseudo devie alloc and start a crypto engine */
paes_crypto_engine =
crypto_engine_alloc_init_and_set(paes_dev.this_device,
- true, NULL, false, MAX_QLEN);
+ true, false, MAX_QLEN);
if (!paes_crypto_engine) {
rc = -ENOMEM;
goto out_err;
diff --git a/arch/s390/crypto/phmac_s390.c b/arch/s390/crypto/phmac_s390.c
index 90602f72108f..7ecfdc4fba2d 100644
--- a/arch/s390/crypto/phmac_s390.c
+++ b/arch/s390/crypto/phmac_s390.c
@@ -1006,7 +1006,7 @@ static int __init s390_phmac_init(void)
/* with this pseudo device alloc and start a crypto engine */
phmac_crypto_engine =
crypto_engine_alloc_init_and_set(phmac_dev.this_device,
- true, NULL, false, MAX_QLEN);
+ true, false, MAX_QLEN);
if (!phmac_crypto_engine) {
rc = -ENOMEM;
goto out_err;
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 445d3c113ee1..8a2400f240d4 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -195,17 +195,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
out:
spin_unlock_irqrestore(&engine->queue_lock, flags);
- /*
- * Batch requests is possible only if
- * hardware can enqueue multiple requests
- */
- if (engine->do_batch_requests) {
- ret = engine->do_batch_requests(engine);
- if (ret)
- dev_err(engine->dev, "failed to do batch requests: %d\n",
- ret);
- }
-
return;
}
@@ -462,12 +451,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
* crypto-engine queue.
* @dev: the device attached with one hardware engine
* @retry_support: whether hardware has support for retry mechanism
- * @cbk_do_batch: pointer to a callback function to be invoked when executing
- * a batch of requests.
- * This has the form:
- * callback(struct crypto_engine *engine)
- * where:
- * engine: the crypto engine structure.
* @rt: whether this queue is set to run as a realtime task
* @qlen: maximum size of the crypto-engine queue
*
@@ -476,7 +459,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
*/
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
- int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen)
{
struct crypto_engine *engine;
@@ -495,11 +477,6 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
engine->idling = false;
engine->retry_support = retry_support;
engine->priv_data = dev;
- /*
- * Batch requests is possible only if
- * hardware has support for retry mechanism.
- */
- engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev));
@@ -534,7 +511,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
*/
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
{
- return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
+ return crypto_engine_alloc_init_and_set(dev, false, rt,
CRYPTO_ENGINE_MAX_QLEN);
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 9fcdb64084ac..0ef00df9730e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -629,8 +629,7 @@ static int caam_jr_probe(struct platform_device *pdev)
}
/* Initialize crypto engine */
- jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
- false,
+ jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, false,
CRYPTO_ENGINE_MAX_QLEN);
if (!jrpriv->engine) {
dev_err(jrdev, "Could not init crypto-engine\n");
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 0d522049f595..3d241446099c 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -139,7 +139,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
spin_lock_init(&vi->data_vq[i].lock);
vi->data_vq[i].vq = vqs[i];
/* Initialize crypto engine */
- vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
+ vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, true,
virtqueue_get_vring_size(vqs[i]));
if (!vi->data_vq[i].engine) {
ret = -ENOMEM;
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 545dbefe3e13..2e60344437da 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -76,7 +76,6 @@ int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
- int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
void crypto_engine_exit(struct crypto_engine *engine);
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index b6a4ea2240fc..8da1a13619c9 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -37,8 +37,6 @@ struct device;
* @unprepare_crypt_hardware: there are currently no more requests on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
@@ -60,8 +58,6 @@ struct crypto_engine {
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
struct kthread_worker *kworker;
struct kthread_work pump_requests;
--
2.50.0
Powered by blists - more mailing lists