[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220330143819.27476-2-sumitg@nvidia.com>
Date: Wed, 30 Mar 2022 20:08:16 +0530
From: Sumit Gupta <sumitg@...dia.com>
To: <rafael@...nel.org>, <viresh.kumar@...aro.org>,
<robh+dt@...nel.org>, <krzk+dt@...nel.org>, <treding@...dia.com>,
<jonathanh@...dia.com>, <linux-pm@...r.kernel.org>,
<linux-tegra@...r.kernel.org>, <devicetree@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
CC: <ksitaraman@...dia.com>, <sanjayc@...dia.com>, <bbasu@...dia.com>,
<sumitg@...dia.com>
Subject: [Patch v2 1/4] cpufreq: tegra194: add soc data to support multiple soc
Adding SoC data and ops to support multiple SoC's in same driver.
Signed-off-by: Sumit Gupta <sumitg@...dia.com>
---
drivers/cpufreq/tegra194-cpufreq.c | 142 +++++++++++++++++++++--------
1 file changed, 105 insertions(+), 37 deletions(-)
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index ac381db25dbe..2d59b2bd0e1d 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved
+ * Copyright (c) 2020 - 2022, NVIDIA CORPORATION. All rights reserved
*/
#include <linux/cpu.h>
@@ -35,12 +35,6 @@ enum cluster {
MAX_CLUSTERS,
};
-struct tegra194_cpufreq_data {
- void __iomem *regs;
- size_t num_clusters;
- struct cpufreq_frequency_table **tables;
-};
-
struct tegra_cpu_ctr {
u32 cpu;
u32 coreclk_cnt, last_coreclk_cnt;
@@ -52,13 +46,42 @@ struct read_counters_work {
struct tegra_cpu_ctr c;
};
+struct tegra_cpufreq_ops {
+ void (*read_counters)(struct tegra_cpu_ctr *c);
+ void (*set_cpu_ndiv)(struct cpufreq_policy *policy, u64 ndiv);
+ void (*get_cpu_cluster_id)(u32 cpu, u32 *cpuid, u32 *clusterid);
+ int (*get_cpu_ndiv)(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv);
+};
+
+struct tegra_cpufreq_soc {
+ struct tegra_cpufreq_ops *ops;
+ int maxcpus_per_cluster;
+};
+
+struct tegra194_cpufreq_data {
+ void __iomem *regs;
+ size_t num_clusters;
+ struct cpufreq_frequency_table **tables;
+ const struct tegra_cpufreq_soc *soc;
+};
+
static struct workqueue_struct *read_counters_wq;
-static void get_cpu_cluster(void *cluster)
+static void tegra_get_cpu_mpidr(void *mpidr)
{
- u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ *((u64 *)mpidr) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+}
+
+static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
+{
+ u64 mpidr;
+
+ smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
- *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ if (cpuid)
+ *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ if (clusterid)
+ *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
/*
@@ -85,11 +108,24 @@ static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv);
}
+static void tegra194_read_counters(struct tegra_cpu_ctr *c)
+{
+ u64 val;
+
+ val = read_freq_feedback();
+ c->last_refclk_cnt = lower_32_bits(val);
+ c->last_coreclk_cnt = upper_32_bits(val);
+ udelay(US_DELAY);
+ val = read_freq_feedback();
+ c->refclk_cnt = lower_32_bits(val);
+ c->coreclk_cnt = upper_32_bits(val);
+}
+
static void tegra_read_counters(struct work_struct *work)
{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
struct read_counters_work *read_counters_work;
struct tegra_cpu_ctr *c;
- u64 val;
/*
* ref_clk_counter(32 bit counter) runs on constant clk,
@@ -107,13 +143,7 @@ static void tegra_read_counters(struct work_struct *work)
work);
c = &read_counters_work->c;
- val = read_freq_feedback();
- c->last_refclk_cnt = lower_32_bits(val);
- c->last_coreclk_cnt = upper_32_bits(val);
- udelay(US_DELAY);
- val = read_freq_feedback();
- c->refclk_cnt = lower_32_bits(val);
- c->coreclk_cnt = upper_32_bits(val);
+ data->soc->ops->read_counters(c);
}
/*
@@ -177,7 +207,7 @@ static unsigned int tegra194_calculate_speed(u32 cpu)
return (rate_mhz * KHZ); /* in KHz */
}
-static void get_cpu_ndiv(void *ndiv)
+static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
{
u64 ndiv_val;
@@ -186,30 +216,43 @@ static void get_cpu_ndiv(void *ndiv)
*(u64 *)ndiv = ndiv_val;
}
-static void set_cpu_ndiv(void *data)
+static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
+{
+ int ret;
+
+ ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
+
+ return ret;
+}
+
+static void tegra194_set_cpu_ndiv_sysreg(void *data)
{
- struct cpufreq_frequency_table *tbl = data;
- u64 ndiv_val = (u64)tbl->driver_data;
+ u64 ndiv_val = *(u64 *)data;
asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val));
}
+static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
+{
+ on_each_cpu_mask(policy->cpus, tegra194_set_cpu_ndiv_sysreg, &ndiv, true);
+}
+
static unsigned int tegra194_get_speed(u32 cpu)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *pos;
+ u32 cpuid, clusterid;
unsigned int rate;
u64 ndiv;
int ret;
- u32 cl;
- smp_call_function_single(cpu, get_cpu_cluster, &cl, true);
+ data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
/* reconstruct actual cpu freq using counters */
rate = tegra194_calculate_speed(cpu);
/* get last written ndiv value */
- ret = smp_call_function_single(cpu, get_cpu_ndiv, &ndiv, true);
+ ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
if (WARN_ON_ONCE(ret))
return rate;
@@ -219,7 +262,7 @@ static unsigned int tegra194_get_speed(u32 cpu)
* to the last written ndiv value from freq_table. This is
* done to return consistent value.
*/
- cpufreq_for_each_valid_entry(pos, data->tables[cl]) {
+ cpufreq_for_each_valid_entry(pos, data->tables[clusterid]) {
if (pos->driver_data != ndiv)
continue;
@@ -237,19 +280,22 @@ static unsigned int tegra194_get_speed(u32 cpu)
static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
- u32 cpu;
- u32 cl;
+ int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
+ u32 start_cpu, cpu;
+ u32 clusterid;
- smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true);
+ data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
- if (cl >= data->num_clusters || !data->tables[cl])
+ if (clusterid >= data->num_clusters || !data->tables[clusterid])
return -EINVAL;
+ start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
/* set same policy for all cpus in a cluster */
- for (cpu = (cl * 2); cpu < ((cl + 1) * 2); cpu++)
- cpumask_set_cpu(cpu, policy->cpus);
-
- policy->freq_table = data->tables[cl];
+ for (cpu = start_cpu; cpu < (start_cpu + maxcpus_per_cluster); cpu++) {
+ if (cpu_possible(cpu))
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+ policy->freq_table = data->tables[clusterid];
policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY;
return 0;
@@ -259,13 +305,14 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
/*
* Each core writes frequency in per core register. Then both cores
* in a cluster run at same frequency which is the maximum frequency
* request out of the values requested by both cores in that cluster.
*/
- on_each_cpu_mask(policy->cpus, set_cpu_ndiv, tbl, true);
+ data->soc->ops->set_cpu_ndiv(policy, (u64)tbl->driver_data);
return 0;
}
@@ -280,6 +327,18 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
+static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
+ .read_counters = tegra194_read_counters,
+ .get_cpu_cluster_id = tegra194_get_cpu_cluster_id,
+ .get_cpu_ndiv = tegra194_get_cpu_ndiv,
+ .set_cpu_ndiv = tegra194_set_cpu_ndiv,
+};
+
+const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
+ .ops = &tegra194_cpufreq_ops,
+ .maxcpus_per_cluster = 2,
+};
+
static void tegra194_cpufreq_free_resources(void)
{
destroy_workqueue(read_counters_wq);
@@ -359,6 +418,7 @@ init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp,
static int tegra194_cpufreq_probe(struct platform_device *pdev)
{
+ const struct tegra_cpufreq_soc *soc;
struct tegra194_cpufreq_data *data;
struct tegra_bpmp *bpmp;
int err, i;
@@ -367,6 +427,15 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ soc = of_device_get_match_data(&pdev->dev);
+
+ if (soc->ops && soc->maxcpus_per_cluster) {
+ data->soc = soc;
+ } else {
+ dev_err(&pdev->dev, "soc data missing\n");
+ return -EINVAL;
+ }
+
data->num_clusters = MAX_CLUSTERS;
data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
sizeof(*data->tables), GFP_KERNEL);
@@ -416,10 +485,9 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev)
}
static const struct of_device_id tegra194_cpufreq_of_match[] = {
- { .compatible = "nvidia,tegra194-ccplex", },
+ { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match);
static struct platform_driver tegra194_ccplex_driver = {
.driver = {
--
2.17.1
Powered by blists - more mailing lists