[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAPDyKFqa2U3g-7xVjxmsiYHsxujvJc0QCXWXHxPZAQR0ZJJo8g@mail.gmail.com>
Date: Wed, 22 Nov 2023 10:47:56 +0100
From: Ulf Hansson <ulf.hansson@...aro.org>
To: Stephan Gerhold <stephan.gerhold@...nkonzept.com>
Cc: Viresh Kumar <viresh.kumar@...aro.org>,
Andy Gross <agross@...nel.org>,
Bjorn Andersson <andersson@...nel.org>,
Konrad Dybcio <konrad.dybcio@...aro.org>,
Ilia Lin <ilia.lin@...nel.org>,
"Rafael J. Wysocki" <rafael@...nel.org>,
Rob Herring <robh+dt@...nel.org>,
Krzysztof Kozlowski <krzysztof.kozlowski+dt@...aro.org>,
Conor Dooley <conor+dt@...nel.org>, linux-pm@...r.kernel.org,
linux-arm-msm@...r.kernel.org, linux-kernel@...r.kernel.org,
devicetree@...r.kernel.org, Stephan Gerhold <stephan@...hold.net>,
stable@...r.kernel.org
Subject: Re: [PATCH v3 1/3] cpufreq: qcom-nvmem: Enable virtual power domain devices
On Tue, 14 Nov 2023 at 11:08, Stephan Gerhold
<stephan.gerhold@...nkonzept.com> wrote:
>
> The genpd core caches performance state votes from devices that are
> runtime suspended as of commit 3c5a272202c2 ("PM: domains: Improve
> runtime PM performance state handling"). They get applied once the
> device becomes active again.
>
> To attach the power domains needed by qcom-cpufreq-nvmem the OPP core
> calls genpd_dev_pm_attach_by_id(). This results in "virtual" dummy
> devices that use runtime PM only to control the enable and performance
> state for the attached power domain.
>
> However, at the moment nothing ever resumes the virtual devices created
> for qcom-cpufreq-nvmem. They remain permanently runtime suspended. This
> means that performance state votes made during cpufreq scaling get
> always cached and never applied to the hardware.
>
> Fix this by enabling the devices after attaching them.
>
> Without this fix performance states votes are silently ignored, and the
> CPU/CPR voltage is never adjusted. This has been broken since 5.14 but
> for some reason no one noticed this on QCS404 so far.
>
> Cc: stable@...r.kernel.org
> Fixes: 1cb8339ca225 ("cpufreq: qcom: Add support for qcs404 on nvmem driver")
> Signed-off-by: Stephan Gerhold <stephan.gerhold@...nkonzept.com>
Reviewed-by: Ulf Hansson <ulf.hansson@...aro.org>
Kind regards
Uffe
> ---
> drivers/cpufreq/qcom-cpufreq-nvmem.c | 46 +++++++++++++++++++++++++++++++++---
> 1 file changed, 43 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
> index 6355a39418c5..d239a45ed497 100644
> --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
> +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
> @@ -25,6 +25,7 @@
> #include <linux/platform_device.h>
> #include <linux/pm_domain.h>
> #include <linux/pm_opp.h>
> +#include <linux/pm_runtime.h>
> #include <linux/slab.h>
> #include <linux/soc/qcom/smem.h>
>
> @@ -55,6 +56,7 @@ struct qcom_cpufreq_match_data {
>
> struct qcom_cpufreq_drv_cpu {
> int opp_token;
> + struct device **virt_devs;
> };
>
> struct qcom_cpufreq_drv {
> @@ -424,6 +426,18 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
> .get_version = qcom_cpufreq_ipq8074_name_version,
> };
>
> +static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
> +{
> + const char * const *name = drv->data->genpd_names;
> + int i;
> +
> + if (!drv->cpus[cpu].virt_devs)
> + return;
> +
> + for (i = 0; *name; i++, name++)
> + pm_runtime_put(drv->cpus[cpu].virt_devs[i]);
> +}
> +
> static int qcom_cpufreq_probe(struct platform_device *pdev)
> {
> struct qcom_cpufreq_drv *drv;
> @@ -478,6 +492,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
> of_node_put(np);
>
> for_each_possible_cpu(cpu) {
> + struct device **virt_devs = NULL;
> struct dev_pm_opp_config config = {
> .supported_hw = NULL,
> };
> @@ -498,7 +513,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
>
> if (drv->data->genpd_names) {
> config.genpd_names = drv->data->genpd_names;
> - config.virt_devs = NULL;
> + config.virt_devs = &virt_devs;
> }
>
> if (config.supported_hw || config.genpd_names) {
> @@ -509,6 +524,27 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
> goto free_opp;
> }
> }
> +
> + if (virt_devs) {
> + const char * const *name = config.genpd_names;
> + int i, j;
> +
> + for (i = 0; *name; i++, name++) {
> + ret = pm_runtime_resume_and_get(virt_devs[i]);
> + if (ret) {
> + dev_err(cpu_dev, "failed to resume %s: %d\n",
> + *name, ret);
> +
> + /* Rollback previous PM runtime calls */
> + name = config.genpd_names;
> + for (j = 0; *name && j < i; j++, name++)
> + pm_runtime_put(virt_devs[j]);
> +
> + goto free_opp;
> + }
> + }
> + drv->cpus[cpu].virt_devs = virt_devs;
> + }
> }
>
> cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
> @@ -522,8 +558,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
> dev_err(cpu_dev, "Failed to register platform device\n");
>
> free_opp:
> - for_each_possible_cpu(cpu)
> + for_each_possible_cpu(cpu) {
> + qcom_cpufreq_put_virt_devs(drv, cpu);
> dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
> + }
> return ret;
> }
>
> @@ -534,8 +572,10 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
>
> platform_device_unregister(cpufreq_dt_pdev);
>
> - for_each_possible_cpu(cpu)
> + for_each_possible_cpu(cpu) {
> + qcom_cpufreq_put_virt_devs(drv, cpu);
> dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
> + }
> }
>
> static struct platform_driver qcom_cpufreq_driver = {
>
> --
> 2.39.2
>
Powered by blists - more mailing lists