[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5d7146be.1c69fb81.38760.7fb8@mx.google.com>
Date: Thu, 05 Sep 2019 10:32:45 -0700
From: Stephen Boyd <swboyd@...omium.org>
To: Maulik Shah <mkshah@...eaurora.org>, agross@...nel.org,
david.brown@...aro.org, linux-arm-msm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, linux-pm@...r.kernel.org,
bjorn.andersson@...aro.org, evgreen@...omium.org,
dianders@...omium.org, rnayak@...eaurora.org, ilina@...eaurora.org,
lsrao@...eaurora.org, ulf.hansson@...aro.org,
Maulik Shah <mkshah@...eaurora.org>
Subject: Re: [PATCH v2 4/6] drivers: qcom: rpmh-rsc: Add RSC power domain support
Quoting Maulik Shah (2019-08-23 01:17:01)
> diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
> index e278fc11fe5c..884b39599e8f 100644
> --- a/drivers/soc/qcom/rpmh-rsc.c
> +++ b/drivers/soc/qcom/rpmh-rsc.c
> @@ -498,6 +498,32 @@ static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
> return ret;
> }
>
> +/**
> + * rpmh_rsc_ctrlr_is_idle: Check if any of the AMCs are busy.
> + *
> + * @drv: The controller
> + *
> + * Returns false if the TCSes are engaged in handling requests,
Please use kernel-doc style for returns here.
> + * True if controller is idle.
> + */
> +static bool rpmh_rsc_ctrlr_is_idle(struct rsc_drv *drv)
> +{
> + int m;
> + struct tcs_group *tcs = get_tcs_of_type(drv, ACTIVE_TCS);
> + bool ret = true;
> +
> + spin_lock(&drv->lock);
I think these need to be irqsave/restore still.
> + for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
> + if (!tcs_is_free(drv, m)) {
This snippet is from tcs_invalidate(). Please collapse it into some sort
of function or macro like for_each_tcs().
> + ret = false;
> + break;
> + }
> + }
> + spin_unlock(&drv->lock);
> +
> + return ret;
> +}
> +
> /**
> * rpmh_rsc_write_ctrl_data: Write request to the controller
> *
> @@ -521,6 +547,53 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
> return tcs_ctrl_write(drv, msg);
> }
>
> +static int rpmh_domain_power_off(struct generic_pm_domain *rsc_pd)
> +{
> + struct rsc_drv *drv = container_of(rsc_pd, struct rsc_drv, rsc_pd);
> +
> + /*
> + * RPMh domain can not be powered off when there is pending ACK for
> + * ACTIVE_TCS request. Exit when controller is busy.
> + */
> +
Nitpick: Remove this extra newline.
> + if (!rpmh_rsc_ctrlr_is_idle(drv))
> + return -EBUSY;
> +
> + return rpmh_flush(&drv->client);
> +}
> +
> +static int rpmh_probe_power_domain(struct platform_device *pdev,
> + struct rsc_drv *drv)
> +{
> + int ret;
> + struct generic_pm_domain *rsc_pd = &drv->rsc_pd;
> + struct device_node *dn = pdev->dev.of_node;
> +
> + rsc_pd->name = kasprintf(GFP_KERNEL, "%s", dn->name);
Maybe use devm_kasprintf?
> + if (!rsc_pd->name)
> + return -ENOMEM;
> +
> + rsc_pd->name = kbasename(rsc_pd->name);
> + rsc_pd->power_off = rpmh_domain_power_off;
> + rsc_pd->flags |= GENPD_FLAG_IRQ_SAFE;
> +
> + ret = pm_genpd_init(rsc_pd, NULL, false);
> + if (ret)
> + goto free_name;
> +
> + ret = of_genpd_add_provider_simple(dn, rsc_pd);
> + if (ret)
> + goto remove_pd;
> +
> + return ret;
> +
> +remove_pd:
> + pm_genpd_remove(rsc_pd);
> +free_name:
> + kfree(rsc_pd->name);
And then drop this one?
> + return ret;
> +}
> +
> static int rpmh_probe_tcs_config(struct platform_device *pdev,
> struct rsc_drv *drv)
> {
> @@ -650,6 +723,17 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
> if (ret)
> return ret;
>
> + /*
> + * Power domain is not required for controllers that support 'solver'
> + * mode where they can be in autonomous mode executing low power mode
> + * to power down.
> + */
> + if (of_property_read_bool(dn, "#power-domain-cells")) {
> + ret = rpmh_probe_power_domain(pdev, drv);
> + if (ret)
> + return ret;
> + }
> +
> spin_lock_init(&drv->lock);
> bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
What happens if it fails later on? The genpd provider is still sitting
around and needs to be removed on probe failure later on in this
function. It would be nicer if there wasn't another function to probe
the power domain and it was just inlined here actually. That way we
don't have to wonder about what's going on across two blocks of code.
Powered by blists - more mailing lists