[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250826110917.3383061-1-quic_utiwari@quicinc.com>
Date: Tue, 26 Aug 2025 16:39:17 +0530
From: <quic_utiwari@...cinc.com>
To: <herbert@...dor.apana.org.au>, <thara.gopinath@...il.com>,
<davem@...emloft.net>
CC: <linux-crypto@...r.kernel.org>, <linux-arm-msm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <quic_neersoni@...cinc.com>,
<quic_kuldsing@...cinc.com>, <quic_gaurkash@...cinc.com>
Subject: [PATCH v2] crypto: qce - Add runtime PM and interconnect bandwidth scaling support
From: Udit Tiwari <quic_utiwari@...cinc.com>
The Qualcomm Crypto Engine (QCE) driver currently lacks support for
runtime power management (PM) and interconnect bandwidth control.
As a result, the hardware remains fully powered and clocks stay
enabled even when the device is idle. Additionally, static
interconnect bandwidth votes are held indefinitely, preventing the
system from reclaiming unused bandwidth.
Address this by enabling runtime PM and dynamic interconnect
bandwidth scaling to allow the system to suspend the device when idle
and scale interconnect usage based on actual demand. Improve overall
system efficiency by reducing power usage and optimizing interconnect
resource allocation.
Make the following changes as part of this integration:
- Add support for pm_runtime APIs to manage device power state
transitions.
- Implement runtime_suspend() and runtime_resume() callbacks to gate
clocks and vote for interconnect bandwidth only when needed.
- Replace devm_clk_get_optional_enabled() with
devm_clk_get_optional() and move clock enabling to the resume path.
- Register dev_pm_ops with the platform driver to hook into the PM
framework.
Tested:
- Verify that ICC votes drop to zero after probe and upon request
completion.
- Confirm that runtime PM usage count increments during active
requests and decrements afterward.
- Observe that the device correctly enters the suspended state when
idle.
Signed-off-by: Udit Tiwari <quic_utiwari@...cinc.com>
---
Changes in v2:
- Extend suspend/resume support to include runtime PM and ICC scaling.
- Register dev_pm_ops and implement runtime_suspend/resume callbacks.
- Link to v1: https://lore.kernel.org/lkml/20250606105808.2119280-1-quic_utiwari@quicinc.com/
---
drivers/crypto/qce/core.c | 120 ++++++++++++++++++++++++++++++++++----
1 file changed, 108 insertions(+), 12 deletions(-)
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index e95e84486d9a..70b9d9e739be 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
@@ -91,22 +92,28 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *async_req, *backlog;
int ret = 0, err;
+ ret = pm_runtime_get_sync(qce->dev);
+ if (ret < 0) {
+ pr_err("error with pm_runtime_get_sync");
+ pm_runtime_put_noidle(qce->dev);
+ return ret;
+ }
+
scoped_guard(mutex, &qce->lock) {
if (req)
ret = crypto_enqueue_request(&qce->queue, req);
/* busy, do not dequeue request */
if (qce->req)
- return ret;
+ goto qce_suspend;
backlog = crypto_get_backlog(&qce->queue);
async_req = crypto_dequeue_request(&qce->queue);
if (async_req)
qce->req = async_req;
}
-
if (!async_req)
- return ret;
+ goto qce_suspend;
if (backlog) {
scoped_guard(mutex, &qce->lock)
@@ -119,6 +126,8 @@ static int qce_handle_queue(struct qce_device *qce,
schedule_work(&qce->done_work);
}
+qce_suspend:
+ pm_runtime_put_autosuspend(qce->dev);
return ret;
}
@@ -208,37 +217,43 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
+ qce->core = devm_clk_get_optional(dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
- qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
+ qce->iface = devm_clk_get_optional(dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
- qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
+ qce->bus = devm_clk_get_optional(dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
- qce->mem_path = devm_of_icc_get(qce->dev, "memory");
+ qce->mem_path = devm_of_icc_get(dev, "memory");
if (IS_ERR(qce->mem_path))
return PTR_ERR(qce->mem_path);
- ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
+ /* Enable runtime PM after clocks and ICC are acquired */
+
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
- ret = devm_qce_dma_request(qce->dev, &qce->dma);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
+ ret = devm_qce_dma_request(qce->dev, &qce->dma);
+ if (ret)
+ goto err_pm;
+
ret = qce_check_version(qce);
if (ret)
- return ret;
+ goto err_pm;
ret = devm_mutex_init(qce->dev, &qce->lock);
if (ret)
- return ret;
+ goto err_pm;
INIT_WORK(&qce->done_work, qce_req_done_work);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
@@ -246,9 +261,89 @@ static int qce_crypto_probe(struct platform_device *pdev)
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
- return devm_qce_register_algs(qce);
+ ret = devm_qce_register_algs(qce);
+ if (ret)
+ goto err_pm;
+
+ /* Configure autosuspend after successful init */
+ pm_runtime_set_autosuspend_delay(dev, 100);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+err_pm:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int qce_runtime_suspend(struct device *dev)
+{
+ struct qce_device *qce = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(qce->bus);
+ clk_disable_unprepare(qce->iface);
+ clk_disable_unprepare(qce->core);
+ icc_disable(qce->mem_path);
+
+ return 0;
+}
+
+static int qce_runtime_resume(struct device *dev)
+{
+ struct qce_device *qce = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = icc_enable(qce->mem_path);
+ if (ret)
+ return ret;
+
+ ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
+ if (ret)
+ goto err_icc;
+
+ ret = clk_prepare_enable(qce->core);
+ if (ret)
+ goto err_icc;
+
+ ret = clk_prepare_enable(qce->iface);
+ if (ret)
+ goto err_core;
+
+ ret = clk_prepare_enable(qce->bus);
+ if (ret)
+ goto err_iface;
+
+ return 0;
+
+err_iface:
+ clk_disable_unprepare(qce->iface);
+err_core:
+ clk_disable_unprepare(qce->core);
+err_icc:
+ icc_disable(qce->mem_path);
+ return ret;
}
+static int qce_suspend(struct device *dev)
+{
+ return qce_runtime_suspend(dev);
+}
+
+static int qce_resume(struct device *dev)
+{
+ return qce_runtime_resume(dev);
+}
+
+static const struct dev_pm_ops qce_crypto_pm_ops = {
+ .runtime_suspend = qce_runtime_suspend,
+ .runtime_resume = qce_runtime_resume,
+ .suspend = qce_suspend,
+ .resume = qce_resume,
+};
+
static const struct of_device_id qce_crypto_of_match[] = {
{ .compatible = "qcom,crypto-v5.1", },
{ .compatible = "qcom,crypto-v5.4", },
@@ -262,6 +357,7 @@ static struct platform_driver qce_crypto_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
+ .pm = &qce_crypto_pm_ops,
},
};
module_platform_driver(qce_crypto_driver);
--
2.34.1
Powered by blists - more mailing lists