[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <df0e6ff1e7ebba710228cfe568e64bbb@codeaurora.org>
Date: Tue, 04 May 2021 12:35:58 +0530
From: Sibi Sankar <sibis@...eaurora.org>
To: Matthias Kaehlcke <mka@...omium.org>
Cc: bjorn.andersson@...aro.org, viresh.kumar@...aro.org,
swboyd@...omium.org, agross@...nel.org, robh+dt@...nel.org,
rjw@...ysocki.net, linux-arm-msm@...r.kernel.org,
devicetree@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-pm@...r.kernel.org, dianders@...omium.org
Subject: Re: [PATCH 2/2] arm64: dts: qcom: sc7280: Add cpu OPP tables
On 2021-05-03 22:06, Matthias Kaehlcke wrote:
> On Fri, Apr 30, 2021 at 07:58:21PM +0530, Sibi Sankar wrote:
>> Add OPP tables required to scale DDR/L3 per freq-domain on SC7280
>> SoCs.
>>
>> Signed-off-by: Sibi Sankar <sibis@...eaurora.org>
>> ---
>> arch/arm64/boot/dts/qcom/sc7280.dtsi | 135
>> +++++++++++++++++++++++++++++++++++
>> 1 file changed, 135 insertions(+)
>>
>> diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi
>> b/arch/arm64/boot/dts/qcom/sc7280.dtsi
>> index 0bb835aeae33..90220cecb368 100644
>> --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
>> +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
>> @@ -7,6 +7,7 @@
>>
>> #include <dt-bindings/clock/qcom,gcc-sc7280.h>
>> #include <dt-bindings/clock/qcom,rpmh.h>
>> +#include <dt-bindings/interconnect/qcom,osm-l3.h>
>> #include <dt-bindings/interconnect/qcom,sc7280.h>
>> #include <dt-bindings/interrupt-controller/arm-gic.h>
>> #include <dt-bindings/mailbox/qcom-ipcc.h>
>> @@ -71,6 +72,9 @@
>> &LITTLE_CPU_SLEEP_1
>> &CLUSTER_SLEEP_0>;
>> next-level-cache = <&L2_0>;
>> + operating-points-v2 = <&cpu0_opp_table>;
>> + interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1
>> 3>,
>> + <&epss_l3 MASTER_EPSS_L3_APPS &epss_l3 SLAVE_EPSS_L3_SHARED>;
>
> This patch seems to depend on the 'Add SC7280 interconnect provider
> driver' series
> (https://patchwork.kernel.org/project/linux-arm-msm/list/?series=473747)
> and 'Add L3 provider support for SC7280'
> (https://patchwork.kernel.org/project/linux-arm-msm/list/?series=468285),
> none of them has landed yet. The dependencies should be mentioned in
> the
> commit notes (under '---').
Had all the dependencies listed in the cover
letter. I'll make sure I put them ^^ as well
in v3.
>
>> qcom,freq-domain = <&cpufreq_hw 0>;
>> L2_0: l2-cache {
>> compatible = "cache";
>> @@ -90,6 +94,9 @@
>> &LITTLE_CPU_SLEEP_1
>> &CLUSTER_SLEEP_0>;
>> next-level-cache = <&L2_100>;
>> + operating-points-v2 = <&cpu0_opp_table>;
>> + interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1
>> 3>,
>> + <&epss_l3 MASTER_EPSS_L3_APPS &epss_l3 SLAVE_EPSS_L3_SHARED>;
>> qcom,freq-domain = <&cpufreq_hw 0>;
>> L2_100: l2-cache {
>> compatible = "cache";
>> @@ -106,6 +113,9 @@
>> &LITTLE_CPU_SLEEP_1
>> &CLUSTER_SLEEP_0>;
>> next-level-cache = <&L2_200>;
>> + operating-points-v2 = <&cpu0_opp_table>;
>> + interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1
>> 3>,
>> + <&epss_l3 MASTER_EPSS_L3_APPS &epss_l3 SLAVE_EPSS_L3_SHARED>;
>> qcom,freq-domain = <&cpufreq_hw 0>;
>> L2_200: l2-cache {
>> compatible = "cache";
>> @@ -122,6 +132,9 @@
>> &LITTLE_CPU_SLEEP_1
>> &CLUSTER_SLEEP_0>;
>> next-level-cache = <&L2_300>;
>> + operating-points-v2 = <&cpu0_opp_table>;
>> + interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1
>> 3>,
>> + <&epss_l3 MASTER_EPSS_L3_APPS &epss_l3 SLAVE_EPSS_L3_SHARED>;
>> qcom,freq-domain = <&cpufreq_hw 0>;
>> L2_300: l2-cache {
>> compatible = "cache";
>> @@ -138,6 +151,9 @@
>> &BIG_CPU_SLEEP_1
>> &CLUSTER_SLEEP_0>;
>> next-level-cache = <&L2_400>;
>> + operating-points-v2 = <&cpu4_opp_table>;
>> + interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1
>> 3>,
>> + <&epss_l3 MASTER_EPSS_L3_APPS &epss_l3 SLAVE_EPSS_L3_SHARED>;
>> qcom,freq-domain = <&cpufreq_hw 1>;
>> L2_400: l2-cache {
>> compatible = "cache";
>> @@ -154,6 +170,9 @@
>> &BIG_CPU_SLEEP_1
>> &CLUSTER_SLEEP_0>;
>> next-level-cache = <&L2_500>;
>> + operating-points-v2 = <&cpu4_opp_table>;
>> + interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1
>> 3>,
>> + <&epss_l3 MASTER_EPSS_L3_APPS &epss_l3 SLAVE_EPSS_L3_SHARED>;
>> qcom,freq-domain = <&cpufreq_hw 1>;
>> L2_500: l2-cache {
>> compatible = "cache";
>> @@ -170,6 +189,9 @@
>> &BIG_CPU_SLEEP_1
>> &CLUSTER_SLEEP_0>;
>> next-level-cache = <&L2_600>;
>> + operating-points-v2 = <&cpu4_opp_table>;
>> + interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1
>> 3>,
>> + <&epss_l3 MASTER_EPSS_L3_APPS &epss_l3 SLAVE_EPSS_L3_SHARED>;
>> qcom,freq-domain = <&cpufreq_hw 1>;
>> L2_600: l2-cache {
>> compatible = "cache";
>> @@ -186,6 +208,9 @@
>> &BIG_CPU_SLEEP_1
>> &CLUSTER_SLEEP_0>;
>> next-level-cache = <&L2_700>;
>> + operating-points-v2 = <&cpu4_opp_table>;
>> + interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1
>> 3>,
>> + <&epss_l3 MASTER_EPSS_L3_APPS &epss_l3 SLAVE_EPSS_L3_SHARED>;
>> qcom,freq-domain = <&cpufreq_hw 2>;
>> L2_700: l2-cache {
>> compatible = "cache";
>> @@ -248,6 +273,116 @@
>> };
>> };
>>
>> + cpu0_opp_table: cpu0_opp_table {
>
> the node name should use dashes as separators instead of underscores,
> i.e.
> it should be 'cpu0-opp-table'.
will fix it in v3.
>
>> + compatible = "operating-points-v2";
>> + opp-shared;
>> +
>> + cpu0_opp1: opp-300000000 {
>> + opp-hz = /bits/ 64 <300000000>;
>> + opp-peak-kBps = <800000 9600000>;
>> + };
>> +
>> + cpu0_opp2: opp-691200000 {
>> + opp-hz = /bits/ 64 <691200000>;
>> + opp-peak-kBps = <800000 17817600>;
>> + };
>> +
>> + cpu0_opp3: opp-806400000 {
>> + opp-hz = /bits/ 64 <806400000>;
>> + opp-peak-kBps = <800000 20889600>;
>> + };
>> +
>> + cpu0_opp4: opp-940800000 {
>> + opp-hz = /bits/ 64 <940800000>;
>> + opp-peak-kBps = <1804000 24576000>;
>> + };
>> +
>> + cpu0_opp5: opp-1152000000 {
>> + opp-hz = /bits/ 64 <1152000000>;
>> + opp-peak-kBps = <2188000 27033600>;
>> + };
>> +
>> + cpu0_opp6: opp-1324800000 {
>> + opp-hz = /bits/ 64 <1324800000>;
>> + opp-peak-kBps = <2188000 33792000>;
>> + };
>> +
>> + cpu0_opp7: opp-1516800000 {
>> + opp-hz = /bits/ 64 <1516800000>;
>> + opp-peak-kBps = <3072000 38092800>;
>> + };
>> +
>> + cpu0_opp8: opp-1651200000 {
>> + opp-hz = /bits/ 64 <1651200000>;
>> + opp-peak-kBps = <3072000 41779200>;
>> + };
>> +
>> + cpu0_opp9: opp-1804800000 {
>> + opp-hz = /bits/ 64 <1804800000>;
>> + opp-peak-kBps = <4068000 48537600>;
>> + };
>> +
>> + cpu0_opp10: opp-1958400000 {
>> + opp-hz = /bits/ 64 <1958400000>;
>> + opp-peak-kBps = <4068000 48537600>;
>> + };
>> + };
>> +
>> + cpu4_opp_table: cpu4_opp_table {
>
> node name should be 'cpu4-opp-table'
will fix it in v3.
>
>> + compatible = "operating-points-v2";
>> + opp-shared;
>> +
>> + cpu4_opp1: opp-691200000 {
>> + opp-hz = /bits/ 64 <691200000>;
>> + opp-peak-kBps = <1804000 9600000>;
>> + };
>> +
>> + cpu4_opp2: opp-940800000 {
>> + opp-hz = /bits/ 64 <940800000>;
>> + opp-peak-kBps = <2188000 17817600>;
>> + };
>> +
>> + cpu4_opp3: opp-1228800000 {
>> + opp-hz = /bits/ 64 <1228800000>;
>> + opp-peak-kBps = <4068000 24576000>;
>> + };
>> +
>> + cpu4_opp4: opp-1344000000 {
>> + opp-hz = /bits/ 64 <1344000000>;
>> + opp-peak-kBps = <4068000 24576000>;
>> + };
>> +
>> + cpu4_opp5: opp-1516800000 {
>> + opp-hz = /bits/ 64 <1516800000>;
>> + opp-peak-kBps = <4068000 24576000>;
>> + };
>> +
>> + cpu4_opp6: opp-1651200000 {
>> + opp-hz = /bits/ 64 <1651200000>;
>> + opp-peak-kBps = <6220000 38092800>;
>> + };
>> +
>> + cpu4_opp7: opp-1900800000 {
>> + opp-hz = /bits/ 64 <1900800000>;
>> + opp-peak-kBps = <6220000 44851200>;
>> + };
>> +
>> + cpu4_opp8: opp-2054400000 {
>> + opp-hz = /bits/ 64 <2054400000>;
>> + opp-peak-kBps = <6220000 44851200>;
>> + };
>> +
>> + cpu4_opp9: opp-2131200000 {
>> + opp-hz = /bits/ 64 <2131200000>;
>> + opp-peak-kBps = <6220000 44851200>;
>> + };
>> +
>> + cpu4_opp10: opp-2400000000 {
>> + opp-hz = /bits/ 64 <2400000000>;
>> + opp-peak-kBps = <6832000 48537600>;
>> + };
>> + };
>> +
>> memory@...00000 {
>> device_type = "memory";
>> /* We expect the bootloader to fill in the size */
--
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project.
Powered by blists - more mailing lists