[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <154399724253.88331.15449997536107541794@swboyd.mtv.corp.google.com>
Date: Wed, 05 Dec 2018 00:07:22 -0800
From: Stephen Boyd <sboyd@...nel.org>
To: Taniya Das <tdas@...eaurora.org>,
Viresh Kumar <viresh.kumar@...aro.org>
Cc: "Rafael J. Wysocki" <rjw@...ysocki.net>,
linux-kernel@...r.kernel.org, linux-pm@...r.kernel.org,
Rajendra Nayak <rnayak@...eaurora.org>,
devicetree@...r.kernel.org, robh@...nel.org,
skannan@...eaurora.org, linux-arm-msm@...r.kernel.org,
evgreen@...gle.com
Subject: Re: [PATCH v11 2/2] cpufreq: qcom-hw: Add support for QCOM cpufreq HW driver
Quoting Taniya Das (2018-12-04 19:37:00)
> Hello Stephen, Viresh
>
> Thanks for the code and suggestions.
>
> Having a NR_DOMAINS '2' makes the driver not scalable for re-use.
> This assumption is only true for the current version of the HW and do
> not intend to update/clean-up this logic again. So want to stick keeping
> current logic of having the *qcom_freq_domain_map[NR_CPUS].
Ok, so let's just count the number of reg properties there are in the
node and use that to know how many frequency domains exist. That already
maps exactly to how many frequency domains this driver needs to worry
about. Alternatively, we could count the number of named register
regions like 'freq_domain0', 'freq_domain1', etc. are listed in
reg-names so that other register regions in this device could be exposed
in DT when/if they are added. Here's the reg counting patch.
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 8dc6b73c2f22..c0f6a07eb3c5 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -29,8 +29,6 @@ struct cpufreq_qcom {
cpumask_t related_cpus;
};
-static struct cpufreq_qcom *qcom_freq_domain_map[NR_CPUS];
-
static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
@@ -76,9 +74,26 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
- struct cpufreq_qcom *c;
+ struct cpufreq_qcom *freq_domains, *c;
+ struct device_node *cpu_np;
+ struct of_phandle_args args;
+ int ret;
+
+ freq_domains = cpufreq_get_driver_data();
+ if (!freq_domains)
+ return -ENODEV;
+
+ cpu_np = of_cpu_device_node_get(policy->cpu);
+ if (!cpu_np)
+ return -ENODEV;
+
+ ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
+ "#freq-domain-cells", 0, &args);
+ of_node_put(cpu_np);
+ if (ret)
+ return ret;
- c = qcom_freq_domain_map[policy->cpu];
+ c = &freq_domains[args.args[0]];
if (!c) {
pr_err("No scaling support for CPU%d\n", policy->cpu);
return -ENODEV;
@@ -171,43 +186,15 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev, struct cpufreq_qcom *c,
return 0;
}
-static void qcom_get_related_cpus(int index, struct cpumask *m)
-{
- struct device_node *cpu_np;
- struct of_phandle_args args;
- int cpu, ret;
-
- for_each_possible_cpu(cpu) {
- cpu_np = of_cpu_device_node_get(cpu);
- if (!cpu_np)
- continue;
-
- ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
- "#freq-domain-cells", 0,
- &args);
- of_node_put(cpu_np);
- if (ret < 0)
- continue;
-
- if (index == args.args[0])
- cpumask_set_cpu(cpu, m);
- }
-}
-
static int qcom_cpu_resources_init(struct platform_device *pdev,
- unsigned int cpu, int index,
- unsigned long xo_rate,
- unsigned long cpu_hw_rate)
+ int index, unsigned long xo_rate,
+ unsigned long cpu_hw_rate,
+ struct cpufreq_qcom *c)
{
- struct cpufreq_qcom *c;
struct resource *res;
struct device *dev = &pdev->dev;
void __iomem *base;
- int ret, cpu_r;
-
- c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
- if (!c)
- return -ENOMEM;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
base = devm_ioremap_resource(dev, res);
@@ -220,12 +207,6 @@ static int qcom_cpu_resources_init(struct platform_device *pdev,
return -ENODEV;
}
- qcom_get_related_cpus(index, &c->related_cpus);
- if (!cpumask_weight(&c->related_cpus)) {
- dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
- return -ENOENT;
- }
-
c->perf_state_reg = base + REG_PERF_STATE;
ret = qcom_cpufreq_hw_read_lut(dev, c, base, xo_rate, cpu_hw_rate);
@@ -234,9 +215,6 @@ static int qcom_cpu_resources_init(struct platform_device *pdev,
return ret;
}
- for_each_cpu(cpu_r, &c->related_cpus)
- qcom_freq_domain_map[cpu_r] = c;
-
return 0;
}
@@ -245,9 +223,20 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
struct device_node *cpu_np;
struct of_phandle_args args;
struct clk *clk;
- unsigned int cpu;
+ unsigned int cpu, domain, max_domains;
unsigned long xo_rate, cpu_hw_rate;
int ret;
+ struct cpufreq_qcom *freq_domains, *freq_domain;
+
+ max_domains = 0;
+ while (platform_get_resource(pdev, IORESOURCE_MEM, max_domains))
+ max_domains++;
+
+ freq_domains = devm_kcalloc(&pdev->dev, max_domains,
+ sizeof(*freq_domains), GFP_KERNEL);
+ cpufreq_qcom_hw_driver.driver_data = freq_domains;
+ if (!freq_domains)
+ return -ENOMEM;
clk = clk_get(&pdev->dev, "xo");
if (IS_ERR(clk))
@@ -273,16 +262,26 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
"#freq-domain-cells", 0,
- &args);
+ &args);
of_node_put(cpu_np);
if (ret)
return ret;
- if (qcom_freq_domain_map[cpu])
+ domain = args.args[0];
+ if (WARN_ON(domain >= max_domains))
+ continue;
+
+ freq_domain = &freq_domains[domain];
+ cpumask_set_cpu(cpu, &freq_domain->related_cpus);
+ /*
+ * If we've already populated the frequency table for this domain
+ * just mark it related and get out of here
+ */
+ if (cpumask_weight(&freq_domain->related_cpus) > 1)
continue;
- ret = qcom_cpu_resources_init(pdev, cpu, args.args[0],
- xo_rate, cpu_hw_rate);
+ ret = qcom_cpu_resources_init(pdev, domain, xo_rate,
+ cpu_hw_rate, freq_domain);
if (ret)
return ret;
}
Powered by blists - more mailing lists