[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <60f523da-f38b-473c-b5f1-f3af8c24bb9b@amd.com>
Date: Mon, 30 Oct 2023 11:09:12 -0500
From: Tanmay Shah <tanmay.shah@....com>
To: Mathieu Poirier <mathieu.poirier@...aro.org>
Cc: andersson@...nel.org, robh+dt@...nel.org,
krzysztof.kozlowski+dt@...aro.org, conor+dt@...nel.org,
michal.simek@....com, ben.levinsky@....com,
linux-remoteproc@...r.kernel.org, devicetree@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v6 3/4] remoteproc: zynqmp: add pm domains support
Hi Mathieu,
I agree to all the comments, I will address them in next revision.
Thanks,
Tanmay
On 10/18/23 12:38 PM, Mathieu Poirier wrote:
> Good morning,
>
> On Thu, Oct 12, 2023 at 09:22:28PM -0700, Tanmay Shah wrote:
> > Use TCM pm domains extracted from device-tree
> > to power on/off TCM using general pm domain framework.
> >
> > Signed-off-by: Tanmay Shah <tanmay.shah@....com>
> > ---
> >
> > Changes in v6:
> > - Remove spurious change
> > - Handle errors in add_pm_domains function
> > - Remove redundant code to handle errors from remove_pm_domains
> >
> > drivers/remoteproc/xlnx_r5_remoteproc.c | 262 ++++++++++++++++++++++--
> > 1 file changed, 243 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
> > index 4395edea9a64..04e95d880184 100644
> > --- a/drivers/remoteproc/xlnx_r5_remoteproc.c
> > +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
> > @@ -16,6 +16,7 @@
> > #include <linux/of_reserved_mem.h>
> > #include <linux/platform_device.h>
> > #include <linux/remoteproc.h>
> > +#include <linux/pm_domain.h>
> >
> > #include "remoteproc_internal.h"
> >
> > @@ -102,6 +103,12 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
> > * @rproc: rproc handle
> > * @pm_domain_id: RPU CPU power domain id
> > * @ipi: pointer to mailbox information
> > + * @num_pm_dev: number of tcm pm domain devices for this core
> > + * @pm_dev1: pm domain virtual devices for power domain framework
> > + * @pm_dev_link1: pm domain device links after registration
> > + * @pm_dev2: used only in lockstep mode. second core's pm domain virtual devices
> > + * @pm_dev_link2: used only in lockstep mode. second core's pm device links after
> > + * registration
> > */
> > struct zynqmp_r5_core {
> > struct device *dev;
> > @@ -111,6 +118,11 @@ struct zynqmp_r5_core {
> > struct rproc *rproc;
> > u32 pm_domain_id;
> > struct mbox_info *ipi;
> > + int num_pm_dev;
> > + struct device **pm_dev1;
>
> s/pm_dev1/pm_dev_core0
>
> > + struct device_link **pm_dev_link1;
>
> s/pm_dev_link1/pm_dev_core0_link;
>
> > + struct device **pm_dev2;
>
> s/pm_dev2/pm_dev_core1
>
> > + struct device_link **pm_dev_link2;
>
> s/pm_dev_link2/pm_dev_core1_link;
>
> > };
> >
> > /**
> > @@ -575,12 +587,21 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
> > bank_size = r5_core->tcm_banks[i]->size;
> > pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
> >
> > - ret = zynqmp_pm_request_node(pm_domain_id,
> > - ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> > - ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> > - if (ret < 0) {
> > - dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
> > - goto release_tcm_split;
> > + /*
> > + * If TCM information is available in device-tree then
> > + * in that case, pm domain framework will power on/off TCM.
> > + * In that case pm_domain_id is set to 0. If hardcode
> > + * bindings from driver is used, then only this driver will
> > + * use pm_domain_id.
> > + */
> > + if (pm_domain_id) {
> > + ret = zynqmp_pm_request_node(pm_domain_id,
> > + ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> > + ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> > + if (ret < 0) {
> > + dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
> > + goto release_tcm_split;
> > + }
>
> This should go in the next patch.
>
> > }
> >
> > dev_dbg(dev, "TCM carveout split mode %s addr=%llx, da=0x%x, size=0x%lx",
> > @@ -646,13 +667,16 @@ static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
> > for (i = 0; i < num_banks; i++) {
> > pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
> >
> > - /* Turn on each TCM bank individually */
> > - ret = zynqmp_pm_request_node(pm_domain_id,
> > - ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> > - ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> > - if (ret < 0) {
> > - dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
> > - goto release_tcm_lockstep;
> > + if (pm_domain_id) {
> > + /* Turn on each TCM bank individually */
> > + ret = zynqmp_pm_request_node(pm_domain_id,
> > + ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> > + ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> > + if (ret < 0) {
> > + dev_err(dev, "failed to turn on TCM 0x%x",
> > + pm_domain_id);
> > + goto release_tcm_lockstep;
> > + }
>
> Same
>
> > }
> >
> > bank_size = r5_core->tcm_banks[i]->size;
> > @@ -687,7 +711,8 @@ static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
> > /* If failed, Turn off all TCM banks turned on before */
> > for (i--; i >= 0; i--) {
> > pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
> > - zynqmp_pm_release_node(pm_domain_id);
> > + if (pm_domain_id)
> > + zynqmp_pm_release_node(pm_domain_id);
> > }
> > return ret;
> > }
> > @@ -758,6 +783,192 @@ static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
> > return ret;
> > }
> >
> > +static void zynqmp_r5_remove_pm_domains(struct rproc *rproc)
> > +{
> > + struct zynqmp_r5_core *r5_core = rproc->priv;
> > + struct device *dev = r5_core->dev;
> > + struct zynqmp_r5_cluster *cluster;
> > + int i;
> > +
> > + cluster = platform_get_drvdata(to_platform_device(dev->parent));
> > +
> > + for (i = 1; i < r5_core->num_pm_dev; i++) {
> > + device_link_del(r5_core->pm_dev_link1[i]);
> > + dev_pm_domain_detach(r5_core->pm_dev1[i], false);
> > + }
> > +
> > + kfree(r5_core->pm_dev1);
> > + r5_core->pm_dev1 = NULL;
> > + kfree(r5_core->pm_dev_link1);
> > + r5_core->pm_dev_link1 = NULL;
> > +
> > + if (cluster->mode == SPLIT_MODE) {
> > + r5_core->num_pm_dev = 0;
> > + return;
> > + }
> > +
> > + for (i = 1; i < r5_core->num_pm_dev; i++) {
> > + device_link_del(r5_core->pm_dev_link2[i]);
> > + dev_pm_domain_detach(r5_core->pm_dev2[i], false);
> > + }
> > +
> > + kfree(r5_core->pm_dev2);
> > + r5_core->pm_dev2 = NULL;
> > + kfree(r5_core->pm_dev_link2);
> > + r5_core->pm_dev_link2 = NULL;
> > + r5_core->num_pm_dev = 0;
> > +}
> > +
> > +static int zynqmp_r5_add_pm_domains(struct rproc *rproc)
> > +{
> > + struct zynqmp_r5_core *r5_core = rproc->priv;
> > + struct device *dev = r5_core->dev, *dev2;
> > + struct zynqmp_r5_cluster *cluster;
> > + struct platform_device *pdev;
> > + struct device_node *np;
> > + int i, j, num_pm_dev, ret;
> > +
> > + cluster = dev_get_drvdata(dev->parent);
> > +
> > + /* get number of power-domains */
> > + num_pm_dev = of_count_phandle_with_args(r5_core->np, "power-domains",
> > + "#power-domain-cells");
> > +
> > + if (num_pm_dev <= 0)
> > + return -EINVAL;
> > +
> > + r5_core->pm_dev1 = kcalloc(num_pm_dev,
> > + sizeof(struct device *),
> > + GFP_KERNEL);
> > + if (!r5_core->pm_dev1)
> > + ret = -ENOMEM;
> > +
> > + r5_core->pm_dev_link1 = kcalloc(num_pm_dev,
> > + sizeof(struct device_link *),
> > + GFP_KERNEL);
> > + if (!r5_core->pm_dev_link1) {
> > + kfree(r5_core->pm_dev1);
> > + r5_core->pm_dev1 = NULL;
> > + return -ENOMEM;
> > + }
> > +
> > + r5_core->num_pm_dev = num_pm_dev;
> > +
> > + /*
> > + * start from 2nd entry in power-domains property list as
> > + * for zynqmp we only add TCM power domains and not core's power domain.
> > + */
>
> It would be worth mentionning where the 1st entry get added.
>
> > + for (i = 1; i < r5_core->num_pm_dev; i++) {
> > + r5_core->pm_dev1[i] = dev_pm_domain_attach_by_id(dev, i);
> > + if (IS_ERR_OR_NULL(r5_core->pm_dev1[i])) {
> > + dev_dbg(dev, "failed to attach pm domain %d, ret=%ld\n", i,
> > + PTR_ERR(r5_core->pm_dev1[i]));
> > + ret = -EINVAL;
> > + goto fail_add_pm_domains;
> > + }
> > + r5_core->pm_dev_link1[i] = device_link_add(dev, r5_core->pm_dev1[i],
> > + DL_FLAG_STATELESS |
> > + DL_FLAG_RPM_ACTIVE |
> > + DL_FLAG_PM_RUNTIME);
> > + if (!r5_core->pm_dev_link1[i]) {
> > + dev_pm_domain_detach(r5_core->pm_dev1[i], true);
> > + r5_core->pm_dev1[i] = NULL;
> > + ret = -EINVAL;
>
> Cleanup for this iteration is properly done here. As such the while() loop in
> fail_add_pm_domains needs to be while (--i >= 0). See my comment below.
>
> > + goto fail_add_pm_domains;
> > + }
> > + }
> > +
> > + if (cluster->mode == SPLIT_MODE)
> > + return 0;
> > +
> > + r5_core->pm_dev2 = kcalloc(num_pm_dev,
> > + sizeof(struct device *),
> > + GFP_KERNEL);
> > + if (!r5_core->pm_dev2) {
> > + ret = -ENOMEM;
> > + goto fail_add_pm_domains;
> > + }
> > +
> > + r5_core->pm_dev_link2 = kcalloc(num_pm_dev,
> > + sizeof(struct device_link *),
> > + GFP_KERNEL);
> > + if (!r5_core->pm_dev_link2) {
> > + kfree(r5_core->pm_dev2);
> > + r5_core->pm_dev2 = NULL;
> > + ret = -ENOMEM;
> > + goto fail_add_pm_domains;
> > + }
> > +
> > + /* get second core's device to detach its power-domains */
> > + np = of_get_next_child(cluster->dev->of_node, of_node_get(dev->of_node));
> > +
> > + pdev = of_find_device_by_node(np);
> > + if (!pdev) {
> > + dev_err(cluster->dev, "core1 platform device not available\n");
> > + kfree(r5_core->pm_dev2);
> > + kfree(r5_core->pm_dev_link2);
> > + r5_core->pm_dev2 = NULL;
> > + r5_core->pm_dev_link2 = NULL;
> > + ret = -EINVAL;
> > + goto fail_add_pm_domains;
> > + }
> > +
> > + dev2 = &pdev->dev;
> > +
> > + /* for zynqmp we only add TCM power domains and not core's power domain */
> > + for (j = 1; j < r5_core->num_pm_dev; j++) {
> > + r5_core->pm_dev2[j] = dev_pm_domain_attach_by_id(dev2, j);
> > + if (!r5_core->pm_dev2[j]) {
> > + dev_dbg(dev, "can't attach to pm domain %d\n", j);
> > + ret = -EINVAL;
> > + goto fail_add_pm_domains_lockstep;
> > + } else if (IS_ERR(r5_core->pm_dev2[j])) {
> > + dev_dbg(dev, "can't attach to pm domain %d\n", j);
> > + ret = PTR_ERR(r5_core->pm_dev2[j]);
> > + goto fail_add_pm_domains_lockstep;
> > + }
> > +
> > + r5_core->pm_dev_link2[j] = device_link_add(dev, r5_core->pm_dev2[j],
> > + DL_FLAG_STATELESS |
> > + DL_FLAG_RPM_ACTIVE |
> > + DL_FLAG_PM_RUNTIME);
> > + if (!r5_core->pm_dev_link2[j]) {
> > + dev_pm_domain_detach(r5_core->pm_dev2[j], true);
> > + r5_core->pm_dev2[j] = NULL;
> > + ret = -ENODEV;
> > + goto fail_add_pm_domains_lockstep;
> > + }
> > + }
> > +
> > +fail_add_pm_domains_lockstep:
> > + while (j >= 1) {
> > + if (r5_core->pm_dev_link2 && !IS_ERR_OR_NULL(r5_core->pm_dev_link2[j]))
> > + device_link_del(r5_core->pm_dev_link2[j]);
> > + if (r5_core->pm_dev2 && !IS_ERR_OR_NULL(r5_core->pm_dev2[j]))
> > + dev_pm_domain_detach(r5_core->pm_dev2[j], true);
> > + j--;
> > + }
> > + kfree(r5_core->pm_dev2);
> > + r5_core->pm_dev2 = NULL;
> > + kfree(r5_core->pm_dev_link2);
> > + r5_core->pm_dev_link2 = NULL;
> > +
> > +fail_add_pm_domains:
> > + while (i >= 1) {
> > + if (r5_core->pm_dev_link1 && !IS_ERR_OR_NULL(r5_core->pm_dev_link1[i]))
> > + device_link_del(r5_core->pm_dev_link1[i]);
>
> Because the cleanup is properly done above we can start the loop at the previous
> value of 'i', i.e --i. The added bonus is that you don't need the if()
> statement.
>
> Another problem with starting at 'i' is that you get an out of bound access when
> all PM domains have been properly added for core 0 but fail for core 1.
>
> > + if (r5_core->pm_dev1 && !IS_ERR_OR_NULL(r5_core->pm_dev1[i]))
> > + dev_pm_domain_detach(r5_core->pm_dev1[i], true);
>
> Same as above.
>
> I will stop here for this revision.
>
> Mathieu
>
>
> > + i--;
> > + }
> > + kfree(r5_core->pm_dev1);
> > + r5_core->pm_dev1 = NULL;
> > + kfree(r5_core->pm_dev_link1);
> > + r5_core->pm_dev_link1 = NULL;
> > +
> > + return ret;
> > +}
> > +
> > /**
> > * zynqmp_r5_rproc_prepare()
> > * adds carveouts for TCM bank and reserved memory regions
> > @@ -770,19 +981,30 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
> > {
> > int ret;
> >
> > + ret = zynqmp_r5_add_pm_domains(rproc);
> > + if (ret) {
> > + dev_err(&rproc->dev, "failed to add pm domains\n");
> > + return ret;
> > + }
> > +
> > ret = add_tcm_banks(rproc);
> > if (ret) {
> > dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
> > - return ret;
> > + goto fail_prepare;
> > }
> >
> > ret = add_mem_regions_carveout(rproc);
> > if (ret) {
> > dev_err(&rproc->dev, "failed to get reserve mem regions %d\n", ret);
> > - return ret;
> > + goto fail_prepare;
> > }
> >
> > return 0;
> > +
> > +fail_prepare:
> > + zynqmp_r5_remove_pm_domains(rproc);
> > +
> > + return ret;
> > }
> >
> > /**
> > @@ -801,11 +1023,13 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
> >
> > r5_core = rproc->priv;
> >
> > + zynqmp_r5_remove_pm_domains(rproc);
> > +
> > for (i = 0; i < r5_core->tcm_bank_count; i++) {
> > pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
> > - if (zynqmp_pm_release_node(pm_domain_id))
> > - dev_warn(r5_core->dev,
> > - "can't turn off TCM bank 0x%x", pm_domain_id);
> > + if (pm_domain_id && zynqmp_pm_release_node(pm_domain_id))
> > + dev_dbg(r5_core->dev,
> > + "can't turn off TCM bank 0x%x", pm_domain_id);
> > }
> >
> > return 0;
> > --
> > 2.25.1
> >
Powered by blists - more mailing lists