[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240723144610.564273-4-ulf.hansson@linaro.org>
Date: Tue, 23 Jul 2024 16:46:09 +0200
From: Ulf Hansson <ulf.hansson@...aro.org>
To: Viresh Kumar <vireshk@...nel.org>,
Nishanth Menon <nm@...com>,
Stephen Boyd <sboyd@...nel.org>
Cc: Bjorn Andersson <andersson@...nel.org>,
Konrad Dybcio <konrad.dybcio@...aro.org>,
Bryan O'Donoghue <bryan.odonoghue@...aro.org>,
Thierry Reding <thierry.reding@...il.com>,
Mikko Perttunen <mperttunen@...dia.com>,
Jonathan Hunter <jonathanh@...dia.com>,
Ulf Hansson <ulf.hansson@...aro.org>,
linux-pm@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
Ilia Lin <ilia.lin@...nel.org>,
Stephan Gerhold <stephan@...hold.net>,
linux-arm-msm@...r.kernel.org
Subject: [PATCH 3/4] cpufreq: qcom-nvmem: Convert to dev_pm_domain_attach|detach_list()
Rather than hooking up the PM domains through _opp_attach_genpd() and
manually manage runtime PM for the corresponding virtual devices created by
genpd during attach, let's avoid the boilerplate-code by converting into
dev_pm_domain_attach|detach_list.
Signed-off-by: Ulf Hansson <ulf.hansson@...aro.org>
---
drivers/cpufreq/qcom-cpufreq-nvmem.c | 79 +++++++++-------------------
1 file changed, 26 insertions(+), 53 deletions(-)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index ea05d9d67490..65739ce98658 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -52,12 +52,13 @@ struct qcom_cpufreq_match_data {
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv);
- const char **genpd_names;
+ const char **pd_names;
+ unsigned int num_pd_names;
};
struct qcom_cpufreq_drv_cpu {
int opp_token;
- struct device **virt_devs;
+ struct dev_pm_domain_list *pd_list;
};
struct qcom_cpufreq_drv {
@@ -394,8 +395,6 @@ static int qcom_cpufreq_ipq8074_name_version(struct device *cpu_dev,
return 0;
}
-static const char *generic_genpd_names[] = { "perf", NULL };
-
static const struct qcom_cpufreq_match_data match_data_kryo = {
.get_version = qcom_cpufreq_kryo_name_version,
};
@@ -406,13 +405,13 @@ static const struct qcom_cpufreq_match_data match_data_krait = {
static const struct qcom_cpufreq_match_data match_data_msm8909 = {
.get_version = qcom_cpufreq_simple_get_version,
- .genpd_names = generic_genpd_names,
+ .pd_names = (const char *[]) { "perf" },
+ .num_pd_names = 1,
};
-static const char *qcs404_genpd_names[] = { "cpr", NULL };
-
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
- .genpd_names = qcs404_genpd_names,
+ .pd_names = (const char *[]) { "cpr" },
+ .num_pd_names = 1,
};
static const struct qcom_cpufreq_match_data match_data_ipq6018 = {
@@ -427,28 +426,16 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
.get_version = qcom_cpufreq_ipq8074_name_version,
};
-static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
+static void qcom_cpufreq_suspend_pd_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
{
- const char * const *name = drv->data->genpd_names;
+ struct dev_pm_domain_list *pd_list = drv->cpus[cpu].pd_list;
int i;
- if (!drv->cpus[cpu].virt_devs)
+ if (!pd_list)
return;
- for (i = 0; *name; i++, name++)
- device_set_awake_path(drv->cpus[cpu].virt_devs[i]);
-}
-
-static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
-{
- const char * const *name = drv->data->genpd_names;
- int i;
-
- if (!drv->cpus[cpu].virt_devs)
- return;
-
- for (i = 0; *name; i++, name++)
- pm_runtime_put(drv->cpus[cpu].virt_devs[i]);
+ for (i = 0; i < pd_list->num_pds; i++)
+ device_set_awake_path(pd_list->pd_devs[i]);
}
static int qcom_cpufreq_probe(struct platform_device *pdev)
@@ -505,7 +492,6 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
of_node_put(np);
for_each_possible_cpu(cpu) {
- struct device **virt_devs = NULL;
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@@ -524,12 +510,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
config.prop_name = pvs_name;
}
- if (drv->data->genpd_names) {
- config.genpd_names = drv->data->genpd_names;
- config.virt_devs = &virt_devs;
- }
-
- if (config.supported_hw || config.genpd_names) {
+ if (config.supported_hw) {
drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
if (drv->cpus[cpu].opp_token < 0) {
ret = drv->cpus[cpu].opp_token;
@@ -538,25 +519,17 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
}
- if (virt_devs) {
- const char * const *name = config.genpd_names;
- int i, j;
-
- for (i = 0; *name; i++, name++) {
- ret = pm_runtime_resume_and_get(virt_devs[i]);
- if (ret) {
- dev_err(cpu_dev, "failed to resume %s: %d\n",
- *name, ret);
+ if (drv->data->pd_names) {
+ struct dev_pm_domain_attach_data attach_data = {
+ .pd_names = drv->data->pd_names,
+ .num_pd_names = drv->data->num_pd_names,
+ .pd_flags = PD_FLAG_DEV_LINK_ON,
+ };
- /* Rollback previous PM runtime calls */
- name = config.genpd_names;
- for (j = 0; *name && j < i; j++, name++)
- pm_runtime_put(virt_devs[j]);
-
- goto free_opp;
- }
- }
- drv->cpus[cpu].virt_devs = virt_devs;
+ ret = dev_pm_domain_attach_list(cpu_dev, &attach_data,
+ &drv->cpus[cpu].pd_list);
+ if (ret < 0)
+ goto free_opp;
}
}
@@ -572,7 +545,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
free_opp:
for_each_possible_cpu(cpu) {
- qcom_cpufreq_put_virt_devs(drv, cpu);
+ dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
return ret;
@@ -586,7 +559,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu) {
- qcom_cpufreq_put_virt_devs(drv, cpu);
+ dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
}
@@ -597,7 +570,7 @@ static int qcom_cpufreq_suspend(struct device *dev)
unsigned int cpu;
for_each_possible_cpu(cpu)
- qcom_cpufreq_suspend_virt_devs(drv, cpu);
+ qcom_cpufreq_suspend_pd_devs(drv, cpu);
return 0;
}
--
2.34.1
Powered by blists - more mailing lists