[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240612124056.39230-6-quic_sibis@quicinc.com>
Date: Wed, 12 Jun 2024 18:10:56 +0530
From: Sibi Sankar <quic_sibis@...cinc.com>
To: <sudeep.holla@....com>, <cristian.marussi@....com>, <andersson@...nel.org>,
<konrad.dybcio@...aro.org>, <jassisinghbrar@...il.com>,
<robh+dt@...nel.org>, <krzysztof.kozlowski+dt@...aro.org>,
<dmitry.baryshkov@...aro.org>
CC: <linux-kernel@...r.kernel.org>, <linux-arm-msm@...r.kernel.org>,
<devicetree@...r.kernel.org>, <quic_rgottimu@...cinc.com>,
<quic_kshivnan@...cinc.com>, <quic_sibis@...cinc.com>,
<conor+dt@...nel.org>, <quic_nkela@...cinc.com>,
<quic_psodagud@...cinc.com>, <abel.vesa@...aro.org>
Subject: [PATCH V6 5/5] arm64: dts: qcom: x1e80100: Enable cpufreq
Enable cpufreq on X1E80100 SoCs through the SCMI perf protocol node.
Signed-off-by: Sibi Sankar <quic_sibis@...cinc.com>
---
arch/arm64/boot/dts/qcom/x1e80100.dtsi | 63 ++++++++++++++++----------
1 file changed, 39 insertions(+), 24 deletions(-)
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index 7b619db07694..d134dc4c7425 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -69,8 +69,8 @@ CPU0: cpu@0 {
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&L2_0>;
- power-domains = <&CPU_PD0>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD0>, <&scmi_dvfs 0>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
L2_0: l2-cache {
@@ -86,8 +86,8 @@ CPU1: cpu@100 {
reg = <0x0 0x100>;
enable-method = "psci";
next-level-cache = <&L2_0>;
- power-domains = <&CPU_PD1>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD1>, <&scmi_dvfs 0>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
};
@@ -97,8 +97,8 @@ CPU2: cpu@200 {
reg = <0x0 0x200>;
enable-method = "psci";
next-level-cache = <&L2_0>;
- power-domains = <&CPU_PD2>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD2>, <&scmi_dvfs 0>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
};
@@ -108,8 +108,8 @@ CPU3: cpu@300 {
reg = <0x0 0x300>;
enable-method = "psci";
next-level-cache = <&L2_0>;
- power-domains = <&CPU_PD3>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD3>, <&scmi_dvfs 0>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
};
@@ -119,8 +119,8 @@ CPU4: cpu@...00 {
reg = <0x0 0x10000>;
enable-method = "psci";
next-level-cache = <&L2_1>;
- power-domains = <&CPU_PD4>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD4>, <&scmi_dvfs 1>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
L2_1: l2-cache {
@@ -136,8 +136,8 @@ CPU5: cpu@...00 {
reg = <0x0 0x10100>;
enable-method = "psci";
next-level-cache = <&L2_1>;
- power-domains = <&CPU_PD5>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD5>, <&scmi_dvfs 1>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
};
@@ -147,8 +147,8 @@ CPU6: cpu@...00 {
reg = <0x0 0x10200>;
enable-method = "psci";
next-level-cache = <&L2_1>;
- power-domains = <&CPU_PD6>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD6>, <&scmi_dvfs 1>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
};
@@ -158,8 +158,8 @@ CPU7: cpu@...00 {
reg = <0x0 0x10300>;
enable-method = "psci";
next-level-cache = <&L2_1>;
- power-domains = <&CPU_PD7>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD7>, <&scmi_dvfs 1>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
};
@@ -169,8 +169,8 @@ CPU8: cpu@...00 {
reg = <0x0 0x20000>;
enable-method = "psci";
next-level-cache = <&L2_2>;
- power-domains = <&CPU_PD8>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD8>, <&scmi_dvfs 2>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
L2_2: l2-cache {
@@ -186,8 +186,8 @@ CPU9: cpu@...00 {
reg = <0x0 0x20100>;
enable-method = "psci";
next-level-cache = <&L2_2>;
- power-domains = <&CPU_PD9>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD9>, <&scmi_dvfs 2>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
};
@@ -197,8 +197,8 @@ CPU10: cpu@...00 {
reg = <0x0 0x20200>;
enable-method = "psci";
next-level-cache = <&L2_2>;
- power-domains = <&CPU_PD10>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD10>, <&scmi_dvfs 2>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
};
@@ -208,8 +208,8 @@ CPU11: cpu@...00 {
reg = <0x0 0x20300>;
enable-method = "psci";
next-level-cache = <&L2_2>;
- power-domains = <&CPU_PD11>;
- power-domain-names = "psci";
+ power-domains = <&CPU_PD11>, <&scmi_dvfs 2>;
+ power-domain-names = "psci", "perf";
cpu-idle-states = <&CLUSTER_C4>;
};
@@ -309,6 +309,21 @@ scm: scm {
interconnects = <&aggre2_noc MASTER_CRYPTO QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
};
+
+ scmi {
+ compatible = "arm,scmi";
+ mboxes = <&cpucp_mbox 0>, <&cpucp_mbox 2>;
+ mbox-names = "tx", "rx";
+ shmem = <&cpu_scp_lpri0>, <&cpu_scp_lpri1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_dvfs: protocol@13 {
+ reg = <0x13>;
+ #power-domain-cells = <1>;
+ };
+ };
};
clk_virt: interconnect-0 {
--
2.34.1
Powered by blists - more mailing lists