[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190513192300.653-19-ulf.hansson@linaro.org>
Date: Mon, 13 May 2019 21:23:00 +0200
From: Ulf Hansson <ulf.hansson@...aro.org>
To: Sudeep Holla <sudeep.holla@....com>,
Lorenzo Pieralisi <Lorenzo.Pieralisi@....com>,
Mark Rutland <mark.rutland@....com>,
linux-arm-kernel@...ts.infradead.org
Cc: "Rafael J . Wysocki" <rjw@...ysocki.net>,
Daniel Lezcano <daniel.lezcano@...aro.org>,
"Raju P . L . S . S . S . N" <rplsssn@...eaurora.org>,
Amit Kucheria <amit.kucheria@...aro.org>,
Bjorn Andersson <bjorn.andersson@...aro.org>,
Stephen Boyd <sboyd@...nel.org>,
Niklas Cassel <niklas.cassel@...aro.org>,
Tony Lindgren <tony@...mide.com>,
Kevin Hilman <khilman@...nel.org>,
Lina Iyer <ilina@...eaurora.org>,
Viresh Kumar <viresh.kumar@...aro.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Geert Uytterhoeven <geert+renesas@...der.be>,
Souvik Chakravarty <souvik.chakravarty@....com>,
linux-pm@...r.kernel.org, linux-arm-msm@...r.kernel.org,
linux-kernel@...r.kernel.org, Ulf Hansson <ulf.hansson@...aro.org>,
Wei Xu <xuwei5@...ilicon.com>
Subject: [PATCH 18/18] arm64: dts: hikey: Convert to the hierarchical CPU topology layout
To enable the OS to manage last-man standing activities for a CPU, while an
idle state for a group of CPUs is selected, let's convert the Hikey
platform into using the hierarchical CPU topology layout.
Cc: Wei Xu <xuwei5@...ilicon.com>
Signed-off-by: Ulf Hansson <ulf.hansson@...aro.org>
---
Changes:
- None.
---
arch/arm64/boot/dts/hisilicon/hi6220.dtsi | 87 ++++++++++++++++++++---
1 file changed, 76 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 108e2a4227f6..36ff460f428f 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -20,6 +20,64 @@
psci {
compatible = "arm,psci-0.2";
method = "smc";
+
+ CPU_PD0: cpu-pd0 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD0>;
+ domain-idle-states = <&CPU_SLEEP>;
+ };
+
+ CPU_PD1: cpu-pd1 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD0>;
+ domain-idle-states = <&CPU_SLEEP>;
+ };
+
+ CPU_PD2: cpu-pd2 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD0>;
+ domain-idle-states = <&CPU_SLEEP>;
+ };
+
+ CPU_PD3: cpu-pd3 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD0>;
+ domain-idle-states = <&CPU_SLEEP>;
+ };
+
+ CPU_PD4: cpu-pd4 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD1>;
+ domain-idle-states = <&CPU_SLEEP>;
+ };
+
+ CPU_PD5: cpu-pd5 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD1>;
+ domain-idle-states = <&CPU_SLEEP>;
+ };
+
+ CPU_PD6: cpu-pd6 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD1>;
+ domain-idle-states = <&CPU_SLEEP>;
+ };
+
+ CPU_PD7: cpu-pd7 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD1>;
+ domain-idle-states = <&CPU_SLEEP>;
+ };
+
+ CLUSTER_PD0: cluster-pd0 {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&CLUSTER_SLEEP>;
+ };
+
+ CLUSTER_PD1: cluster-pd1 {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&CLUSTER_SLEEP>;
+ };
};
cpus {
@@ -70,9 +128,8 @@
};
CLUSTER_SLEEP: cluster-sleep {
- compatible = "arm,idle-state";
- local-timer-stop;
- arm,psci-suspend-param = <0x1010000>;
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x1000000>;
entry-latency-us = <1000>;
exit-latency-us = <700>;
min-residency-us = <2700>;
@@ -88,9 +145,10 @@
next-level-cache = <&CLUSTER0_L2>;
clocks = <&stub_clock 0>;
operating-points-v2 = <&cpu_opp_table>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <311>;
+ power-domains = <&CPU_PD0>;
+ power-domain-names = "psci";
};
cpu1: cpu@1 {
@@ -101,9 +159,10 @@
next-level-cache = <&CLUSTER0_L2>;
clocks = <&stub_clock 0>;
operating-points-v2 = <&cpu_opp_table>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <311>;
+ power-domains = <&CPU_PD1>;
+ power-domain-names = "psci";
};
cpu2: cpu@2 {
@@ -114,9 +173,10 @@
next-level-cache = <&CLUSTER0_L2>;
clocks = <&stub_clock 0>;
operating-points-v2 = <&cpu_opp_table>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <311>;
+ power-domains = <&CPU_PD2>;
+ power-domain-names = "psci";
};
cpu3: cpu@3 {
@@ -127,9 +187,10 @@
next-level-cache = <&CLUSTER0_L2>;
clocks = <&stub_clock 0>;
operating-points-v2 = <&cpu_opp_table>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <311>;
+ power-domains = <&CPU_PD3>;
+ power-domain-names = "psci";
};
cpu4: cpu@100 {
@@ -140,9 +201,10 @@
next-level-cache = <&CLUSTER1_L2>;
clocks = <&stub_clock 0>;
operating-points-v2 = <&cpu_opp_table>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <311>;
+ power-domains = <&CPU_PD4>;
+ power-domain-names = "psci";
};
cpu5: cpu@101 {
@@ -153,9 +215,10 @@
next-level-cache = <&CLUSTER1_L2>;
clocks = <&stub_clock 0>;
operating-points-v2 = <&cpu_opp_table>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <311>;
+ power-domains = <&CPU_PD5>;
+ power-domain-names = "psci";
};
cpu6: cpu@102 {
@@ -166,9 +229,10 @@
next-level-cache = <&CLUSTER1_L2>;
clocks = <&stub_clock 0>;
operating-points-v2 = <&cpu_opp_table>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <311>;
+ power-domains = <&CPU_PD6>;
+ power-domain-names = "psci";
};
cpu7: cpu@103 {
@@ -179,9 +243,10 @@
next-level-cache = <&CLUSTER1_L2>;
clocks = <&stub_clock 0>;
operating-points-v2 = <&cpu_opp_table>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <311>;
+ power-domains = <&CPU_PD7>;
+ power-domain-names = "psci";
};
CLUSTER0_L2: l2-cache0 {
--
2.17.1
Powered by blists - more mailing lists