[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230526-topic-smd_icc-v6-20-263283111e66@linaro.org>
Date: Wed, 14 Jun 2023 20:04:39 +0200
From: Konrad Dybcio <konrad.dybcio@...aro.org>
To: Andy Gross <agross@...nel.org>,
Bjorn Andersson <andersson@...nel.org>,
Michael Turquette <mturquette@...libre.com>,
Stephen Boyd <sboyd@...nel.org>,
Georgi Djakov <djakov@...nel.org>,
Leo Yan <leo.yan@...aro.org>,
Evan Green <evgreen@...omium.org>,
Rob Herring <robh+dt@...nel.org>,
Krzysztof Kozlowski <krzysztof.kozlowski+dt@...aro.org>,
Conor Dooley <conor+dt@...nel.org>
Cc: Marijn Suijten <marijn.suijten@...ainline.org>,
linux-arm-msm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-clk@...r.kernel.org, linux-pm@...r.kernel.org,
devicetree@...r.kernel.org,
Konrad Dybcio <konrad.dybcio@...aro.org>,
Stephan Gerhold <stephan@...hold.net>
Subject: [PATCH v6 20/22] interconnect: qcom: icc-rpm: Set bandwidth on
both contexts
Up until now, for some reason we've only been setting bandwidth values
on the active-only context. That pretty much meant that RPM could lift
all votes when entering sleep mode. Or never sleep at all.
That in turn could potentially break things like USB wakeup, as the
connection between APSS and SNoC/PNoC would simply be dead.
Set the values appropriately.
Fixes: 30c8fa3ec61a ("interconnect: qcom: Add MSM8916 interconnect provider driver")
Reviewed-by: Stephan Gerhold <stephan@...hold.net>
Signed-off-by: Konrad Dybcio <konrad.dybcio@...aro.org>
---
drivers/interconnect/qcom/icc-rpm.c | 54 +++++++++++++++++++------------------
1 file changed, 28 insertions(+), 26 deletions(-)
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 5f3bbe7e6a0d..928dc27c1d93 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -204,34 +204,39 @@ static int qcom_icc_qos_set(struct icc_node *node)
}
}
-static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 sum_bw)
+static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 *bw)
{
- int ret = 0;
+ int ret, rpm_ctx = 0;
+ u64 bw_bps;
if (qn->qos.ap_owned)
return 0;
- if (qn->mas_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_MASTER_REQ,
- qn->mas_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
- qn->mas_rpm_id, ret);
- return ret;
+ for (rpm_ctx = 0; rpm_ctx < QCOM_SMD_RPM_STATE_NUM; rpm_ctx++) {
+ bw_bps = icc_units_to_bps(bw[rpm_ctx]);
+
+ if (qn->mas_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(rpm_ctx,
+ RPM_BUS_MASTER_REQ,
+ qn->mas_rpm_id,
+ bw_bps);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+ qn->mas_rpm_id, ret);
+ return ret;
+ }
}
- }
- if (qn->slv_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_SLAVE_REQ,
- qn->slv_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
- qn->slv_rpm_id, ret);
- return ret;
+ if (qn->slv_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(rpm_ctx,
+ RPM_BUS_SLAVE_REQ,
+ qn->slv_rpm_id,
+ bw_bps);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
+ qn->slv_rpm_id, ret);
+ return ret;
+ }
}
}
@@ -336,7 +341,6 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
struct qcom_icc_provider *qp;
struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
struct icc_provider *provider;
- u64 sum_bw;
u64 active_rate, sleep_rate;
u64 agg_avg[QCOM_SMD_RPM_STATE_NUM], agg_peak[QCOM_SMD_RPM_STATE_NUM];
u64 max_agg_avg;
@@ -350,14 +354,12 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);
- sum_bw = icc_units_to_bps(max_agg_avg);
-
- ret = qcom_icc_rpm_set(src_qn, sum_bw);
+ ret = qcom_icc_rpm_set(src_qn, agg_avg);
if (ret)
return ret;
if (dst_qn) {
- ret = qcom_icc_rpm_set(dst_qn, sum_bw);
+ ret = qcom_icc_rpm_set(dst_qn, agg_avg);
if (ret)
return ret;
}
--
2.41.0
Powered by blists - more mailing lists