[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230703085555.30285-4-quic_mkshah@quicinc.com>
Date: Mon, 3 Jul 2023 14:25:55 +0530
From: Maulik Shah <quic_mkshah@...cinc.com>
To: <andersson@...nel.org>, <ulf.hansson@...aro.org>,
<dianders@...omium.org>, <swboyd@...omium.org>,
<wingers@...gle.com>, <daniel.lezcano@...aro.org>,
<rafael@...nel.org>
CC: <linux-arm-msm@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-pm@...r.kernel.org>, <sudeep.holla@....com>,
<jwerner@...omium.org>, <quic_lsrao@...cinc.com>,
<quic_rjendra@...cinc.com>, Maulik Shah <quic_mkshah@...cinc.com>,
<devicetree@...r.kernel.org>
Subject: [RESEND v4 3/3] arm64: dts: qcom: sc7280: Add power-domains for cpuidle states
Add power-domains for cpuidle states to use psci os-initiated idle states.
Cc: devicetree@...r.kernel.org
Reviewed-by: Ulf Hansson <ulf.hansson@...aro.org>
Signed-off-by: Maulik Shah <quic_mkshah@...cinc.com>
---
arch/arm64/boot/dts/qcom/sc7280.dtsi | 98 +++++++++++++++++++++-------
1 file changed, 73 insertions(+), 25 deletions(-)
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index a0e8db8270e7..84208a6c673d 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -170,9 +170,8 @@
reg = <0x0 0x0>;
clocks = <&cpufreq_hw 0>;
enable-method = "psci";
- cpu-idle-states = <&LITTLE_CPU_SLEEP_0
- &LITTLE_CPU_SLEEP_1
- &CLUSTER_SLEEP_0>;
+ power-domains = <&CPU_PD0>;
+ power-domain-names = "psci";
next-level-cache = <&L2_0>;
operating-points-v2 = <&cpu0_opp_table>;
interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
@@ -198,9 +197,8 @@
reg = <0x0 0x100>;
clocks = <&cpufreq_hw 0>;
enable-method = "psci";
- cpu-idle-states = <&LITTLE_CPU_SLEEP_0
- &LITTLE_CPU_SLEEP_1
- &CLUSTER_SLEEP_0>;
+ power-domains = <&CPU_PD1>;
+ power-domain-names = "psci";
next-level-cache = <&L2_100>;
operating-points-v2 = <&cpu0_opp_table>;
interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
@@ -221,9 +219,8 @@
reg = <0x0 0x200>;
clocks = <&cpufreq_hw 0>;
enable-method = "psci";
- cpu-idle-states = <&LITTLE_CPU_SLEEP_0
- &LITTLE_CPU_SLEEP_1
- &CLUSTER_SLEEP_0>;
+ power-domains = <&CPU_PD2>;
+ power-domain-names = "psci";
next-level-cache = <&L2_200>;
operating-points-v2 = <&cpu0_opp_table>;
interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
@@ -244,9 +241,8 @@
reg = <0x0 0x300>;
clocks = <&cpufreq_hw 0>;
enable-method = "psci";
- cpu-idle-states = <&LITTLE_CPU_SLEEP_0
- &LITTLE_CPU_SLEEP_1
- &CLUSTER_SLEEP_0>;
+ power-domains = <&CPU_PD3>;
+ power-domain-names = "psci";
next-level-cache = <&L2_300>;
operating-points-v2 = <&cpu0_opp_table>;
interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
@@ -267,9 +263,8 @@
reg = <0x0 0x400>;
clocks = <&cpufreq_hw 1>;
enable-method = "psci";
- cpu-idle-states = <&BIG_CPU_SLEEP_0
- &BIG_CPU_SLEEP_1
- &CLUSTER_SLEEP_0>;
+ power-domains = <&CPU_PD4>;
+ power-domain-names = "psci";
next-level-cache = <&L2_400>;
operating-points-v2 = <&cpu4_opp_table>;
interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
@@ -290,9 +285,8 @@
reg = <0x0 0x500>;
clocks = <&cpufreq_hw 1>;
enable-method = "psci";
- cpu-idle-states = <&BIG_CPU_SLEEP_0
- &BIG_CPU_SLEEP_1
- &CLUSTER_SLEEP_0>;
+ power-domains = <&CPU_PD5>;
+ power-domain-names = "psci";
next-level-cache = <&L2_500>;
operating-points-v2 = <&cpu4_opp_table>;
interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
@@ -313,9 +307,8 @@
reg = <0x0 0x600>;
clocks = <&cpufreq_hw 1>;
enable-method = "psci";
- cpu-idle-states = <&BIG_CPU_SLEEP_0
- &BIG_CPU_SLEEP_1
- &CLUSTER_SLEEP_0>;
+ power-domains = <&CPU_PD6>;
+ power-domain-names = "psci";
next-level-cache = <&L2_600>;
operating-points-v2 = <&cpu4_opp_table>;
interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
@@ -336,9 +329,8 @@
reg = <0x0 0x700>;
clocks = <&cpufreq_hw 2>;
enable-method = "psci";
- cpu-idle-states = <&BIG_CPU_SLEEP_0
- &BIG_CPU_SLEEP_1
- &CLUSTER_SLEEP_0>;
+ power-domains = <&CPU_PD7>;
+ power-domain-names = "psci";
next-level-cache = <&L2_700>;
operating-points-v2 = <&cpu7_opp_table>;
interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
@@ -431,9 +423,11 @@
min-residency-us = <5555>;
local-timer-stop;
};
+ };
+ domain-idle-states {
CLUSTER_SLEEP_0: cluster-sleep-0 {
- compatible = "arm,idle-state";
+ compatible = "domain-idle-state";
idle-state-name = "cluster-power-down";
arm,psci-suspend-param = <0x40003444>;
entry-latency-us = <3263>;
@@ -811,6 +805,59 @@
psci {
compatible = "arm,psci-1.0";
method = "smc";
+
+ CPU_PD0: cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
+ };
+
+ CPU_PD1: cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
+ };
+
+ CPU_PD2: cpu2 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
+ };
+
+ CPU_PD3: cpu3 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
+ };
+
+ CPU_PD4: cpu4 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
+ };
+
+ CPU_PD5: cpu5 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
+ };
+
+ CPU_PD6: cpu6 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
+ };
+
+ CPU_PD7: cpu7 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
+ };
+
+ CLUSTER_PD: cpu-cluster0 {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&CLUSTER_SLEEP_0>;
+ };
};
qspi_opp_table: opp-table-qspi {
@@ -5291,6 +5338,7 @@
<SLEEP_TCS 3>,
<WAKE_TCS 3>,
<CONTROL_TCS 1>;
+ power-domains = <&CLUSTER_PD>;
apps_bcm_voter: bcm-voter {
compatible = "qcom,bcm-voter";
--
2.17.1
Powered by blists - more mailing lists