[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0fe87b36-2daf-4b4b-a6b0-28a4c6c599f3@amd.com>
Date: Thu, 4 Jan 2024 10:14:19 -0600
From: Tanmay Shah <tanmay.shah@....com>
To: Mathieu Poirier <mathieu.poirier@...aro.org>
Cc: andersson@...nel.org, robh+dt@...nel.org,
krzysztof.kozlowski+dt@...aro.org, conor+dt@...nel.org,
michal.simek@....com, ben.levinsky@....com,
linux-remoteproc@...r.kernel.org, devicetree@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v8 3/3] remoteproc: zynqmp: parse TCM from device tree
On 1/3/24 12:17 PM, Mathieu Poirier wrote:
> On Fri, Dec 15, 2023 at 03:57:25PM -0800, Tanmay Shah wrote:
> > ZynqMP TCM information is fixed in driver. Now ZynqMP TCM information
>
> s/"is fixed in driver"/"was fixed in driver"
>
> > is available in device-tree. Parse TCM information in driver
> > as per new bindings.
> >
> > Signed-off-by: Tanmay Shah <tanmay.shah@....com>
> > ---
> >
> > Changes in v8:
> > - parse power-domains property from device-tree and use EEMI calls
> > to power on/off TCM instead of using pm domains framework
> > - Remove checking of pm_domain_id validation to power on/off tcm
> > - Remove spurious change
> >
> > Changes in v7:
> > - move checking of pm_domain_id from previous patch
> > - fix mem_bank_data memory allocation
> >
> > drivers/remoteproc/xlnx_r5_remoteproc.c | 154 +++++++++++++++++++++++-
> > 1 file changed, 148 insertions(+), 6 deletions(-)
> >
> > diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
> > index 4395edea9a64..36d73dcd93f0 100644
> > --- a/drivers/remoteproc/xlnx_r5_remoteproc.c
> > +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
> > @@ -74,8 +74,8 @@ struct mbox_info {
> > };
> >
> > /*
> > - * Hardcoded TCM bank values. This will be removed once TCM bindings are
> > - * accepted for system-dt specifications and upstreamed in linux kernel
> > + * Hardcoded TCM bank values. This will stay in driver to maintain backward
> > + * compatibility with device-tree that does not have TCM information.
> > */
> > static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
> > {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
> > @@ -878,6 +878,139 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
> > return ERR_PTR(ret);
> > }
> >
> > +static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
> > +{
> > + struct of_phandle_args out_args;
> > + int tcm_reg_per_r5, tcm_pd_idx;
> > + struct zynqmp_r5_core *r5_core;
> > + int i, j, tcm_bank_count, ret;
> > + struct platform_device *cpdev;
> > + struct mem_bank_data *tcm;
> > + struct device_node *np;
> > + struct resource *res;
> > + u64 abs_addr, size;
> > + struct device *dev;
> > +
> > + for (i = 0; i < cluster->core_count; i++) {
> > + r5_core = cluster->r5_cores[i];
> > + dev = r5_core->dev;
> > + np = of_node_get(dev_of_node(dev));
> > + tcm_pd_idx = 1;
> > +
> > + /* we have address cell 2 and size cell as 2 */
> > + tcm_reg_per_r5 = of_property_count_elems_of_size(np, "reg",
> > + 4 * sizeof(u32));
> > + if (tcm_reg_per_r5 <= 0) {
> > + dev_err(dev, "can't get reg property err %d\n", tcm_reg_per_r5);
> > + return -EINVAL;
> > + }
> > +
> > + /*
> > + * In lockstep mode, r5 core 0 will use r5 core 1 TCM
> > + * power domains as well. so allocate twice of per core TCM
>
> Twice of what? Please use proper english in your multi line comments, i.e a
> capital letter for the first word and a dot at the end.
>
> > + */
> > + if (cluster->mode == LOCKSTEP_MODE)
> > + tcm_bank_count = tcm_reg_per_r5 * 2;
> > + else
> > + tcm_bank_count = tcm_reg_per_r5;
> > +
> > + r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
> > + sizeof(struct mem_bank_data *),
> > + GFP_KERNEL);
> > + if (!r5_core->tcm_banks)
> > + ret = -ENOMEM;
> > +
> > + r5_core->tcm_bank_count = tcm_bank_count;
> > + for (j = 0; j < tcm_bank_count; j++) {
> > + tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data),
> > + GFP_KERNEL);
> > + if (!tcm)
> > + return -ENOMEM;
> > +
> > + r5_core->tcm_banks[j] = tcm;
> > +
> > + /*
> > + * In lockstep mode, get second core's TCM power domains id
> > + * after first core TCM parsing is done as
>
> There seems to be words missing at the end of the sentence, and there is no dot.
>
> > + */
> > + if (j == tcm_reg_per_r5) {
> > + /* dec first core node */
> > + of_node_put(np);
> > +
> > + /* get second core node */
> > + np = of_get_next_child(cluster->dev->of_node, np);
> > +
> > + /*
> > + * reset index of power-domains property list
> > + * for second core
> > + */
> > + tcm_pd_idx = 1;
> > + }
> > +
> > + /* get power-domains id of tcm */
> > + ret = of_parse_phandle_with_args(np, "power-domains",
> > + "#power-domain-cells",
> > + tcm_pd_idx,
> > + &out_args);
> > + if (ret) {
> > + dev_err(r5_core->dev,
> > + "failed to get tcm %d pm domain, ret %d\n",
> > + j, ret);
> > + of_node_put(out_args.np);
>
> I'm pretty sure this isn't needed in error conditions since @out_args would not
> have been assigned.
>
> > + return ret;
> > + }
> > + tcm->pm_domain_id = out_args.args[0];
> > + of_node_put(out_args.np);
> > + tcm_pd_idx++;
> > +
> > + /*
> > + * In lockstep mode, we only need second core's power domain
> > + * ids. Other information from second core isn't needed so
> > + * ignore it. This forms table as zynqmp_tcm_banks_lockstep
>
> I don't understand the last sentence of this comment and it is missing a dot at
> the end. Comments should be enlightening, the ones I found in this patch are
> sowing confusion.
>
> > + */
> > + if (j >= tcm_reg_per_r5)
> > + contiue;
> > +
>
> This condition and the one above (j == tcm_reg_per_r5) is brittle and almost
> guaranteed to cause maintenance problems in the future.
>
> I understand your will to reuse as much code as possible but this is one of the
> rare cases where duplicating code is probably better. Please introduce two new
> functions, i.e zynqmp_r5_get_tcm_node_from_dt_split() and
> zynqmp_r5_get_tcm_node_from_dt_lockstep(), and do all the necessary processing
> based on the use case.
Hi Mathieu,
I tried to implement this and it still looks hacky, as in lockstep mode unnecessary TCM is being allocated just to store power-domains.
Instead, I am taking another cleaner approach where, TCM is parsed in uniform way in both modes from device-tree during
zynqmp_r5_core_init. However, during "add_tcm_carveout_lockstep_mode" call, I will simply parse second core's TCM power-domains
from device-tree and turn it on.
I will implement this and send v9 after successful testing. I wanted to give you heads up on this approach. I hope it is fine.
Thanks,
Tanmay
>
> Thanks,
> Mathieu
>
> > + /* get tcm address without translation */
> > + ret = of_property_read_reg(np, j, &abs_addr, &size);
> > + if (ret) {
> > + of_node_put(np);
> > + dev_err(dev, "failed to get reg property\n");
> > + return ret;
> > + }
> > +
> > + /*
> > + * remote processor can address only 32 bits
> > + * so convert 64-bits into 32-bits. This will discard
> > + * any unwanted upper 32-bits.
> > + */
> > + tcm->da = (u32)abs_addr;
> > + tcm->size = (u32)size;
> > +
> > + cpdev = to_platform_device(dev);
> > + res = platform_get_resource(cpdev, IORESOURCE_MEM, j);
> > + if (!res) {
> > + of_node_put(np);
> > + dev_err(dev, "failed to get tcm resource\n");
> > + return -EINVAL;
> > + }
> > +
> > + tcm->addr = (u32)res->start;
> > + tcm->bank_name = (char *)res->name;
> > + res = devm_request_mem_region(dev, tcm->addr, tcm->size,
> > + tcm->bank_name);
> > + if (!res) {
> > + dev_err(dev, "failed to request tcm resource\n");
> > + of_node_put(np);
> > + return -EINVAL;
> > + }
> > + }
> > + }
> > +
> > + of_node_put(np);
> > + return 0;
> > +}
> > +
> > /**
> > * zynqmp_r5_get_tcm_node()
> > * Ideally this function should parse tcm node and store information
> > @@ -956,10 +1089,19 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
> > struct zynqmp_r5_core *r5_core;
> > int ret, i;
> >
> > - ret = zynqmp_r5_get_tcm_node(cluster);
> > - if (ret < 0) {
> > - dev_err(dev, "can't get tcm node, err %d\n", ret);
> > - return ret;
> > + r5_core = cluster->r5_cores[0];
> > + if (of_find_property(r5_core->np, "reg", NULL)) {
> > + ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
> > + if (ret) {
> > + dev_err(dev, "can't get tcm node from dt, err %d\n", ret);
> > + return ret;
> > + }
> > + } else {
> > + ret = zynqmp_r5_get_tcm_node(cluster);
> > + if (ret < 0) {
> > + dev_err(dev, "can't get tcm node, err %d\n", ret);
> > + return ret;
> > + }
> > }
> >
> > for (i = 0; i < cluster->core_count; i++) {
> > --
> > 2.25.1
> >
Powered by blists - more mailing lists