[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231215235725.1247350-4-tanmay.shah@amd.com>
Date: Fri, 15 Dec 2023 15:57:25 -0800
From: Tanmay Shah <tanmay.shah@....com>
To: <andersson@...nel.org>, <mathieu.poirier@...aro.org>,
<robh+dt@...nel.org>, <krzysztof.kozlowski+dt@...aro.org>,
<conor+dt@...nel.org>, <michal.simek@....com>, <ben.levinsky@....com>,
<tanmay.shah@....com>
CC: <linux-remoteproc@...r.kernel.org>, <devicetree@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>, <linux-kernel@...r.kernel.org>
Subject: [PATCH v8 3/3] remoteproc: zynqmp: parse TCM from device tree
ZynqMP TCM information is fixed in driver. Now ZynqMP TCM information
is available in device-tree. Parse TCM information in driver
as per new bindings.
Signed-off-by: Tanmay Shah <tanmay.shah@....com>
---
Changes in v8:
- parse power-domains property from device-tree and use EEMI calls
to power on/off TCM instead of using pm domains framework
- Remove checking of pm_domain_id validation to power on/off tcm
- Remove spurious change
Changes in v7:
- move checking of pm_domain_id from previous patch
- fix mem_bank_data memory allocation
drivers/remoteproc/xlnx_r5_remoteproc.c | 154 +++++++++++++++++++++++-
1 file changed, 148 insertions(+), 6 deletions(-)
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 4395edea9a64..36d73dcd93f0 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -74,8 +74,8 @@ struct mbox_info {
};
/*
- * Hardcoded TCM bank values. This will be removed once TCM bindings are
- * accepted for system-dt specifications and upstreamed in linux kernel
+ * Hardcoded TCM bank values. This will stay in driver to maintain backward
+ * compatibility with device-tree that does not have TCM information.
*/
static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
{0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
@@ -878,6 +878,139 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
return ERR_PTR(ret);
}
+static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
+{
+ struct of_phandle_args out_args;
+ int tcm_reg_per_r5, tcm_pd_idx;
+ struct zynqmp_r5_core *r5_core;
+ int i, j, tcm_bank_count, ret;
+ struct platform_device *cpdev;
+ struct mem_bank_data *tcm;
+ struct device_node *np;
+ struct resource *res;
+ u64 abs_addr, size;
+ struct device *dev;
+
+ for (i = 0; i < cluster->core_count; i++) {
+ r5_core = cluster->r5_cores[i];
+ dev = r5_core->dev;
+ np = of_node_get(dev_of_node(dev));
+ tcm_pd_idx = 1;
+
+ /* we have address cell 2 and size cell as 2 */
+ tcm_reg_per_r5 = of_property_count_elems_of_size(np, "reg",
+ 4 * sizeof(u32));
+ if (tcm_reg_per_r5 <= 0) {
+ dev_err(dev, "can't get reg property err %d\n", tcm_reg_per_r5);
+ return -EINVAL;
+ }
+
+ /*
+ * In lockstep mode, r5 core 0 will use r5 core 1 TCM
+ * power domains as well. so allocate twice of per core TCM
+ */
+ if (cluster->mode == LOCKSTEP_MODE)
+ tcm_bank_count = tcm_reg_per_r5 * 2;
+ else
+ tcm_bank_count = tcm_reg_per_r5;
+
+ r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
+ sizeof(struct mem_bank_data *),
+ GFP_KERNEL);
+ if (!r5_core->tcm_banks)
+ ret = -ENOMEM;
+
+ r5_core->tcm_bank_count = tcm_bank_count;
+ for (j = 0; j < tcm_bank_count; j++) {
+ tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data),
+ GFP_KERNEL);
+ if (!tcm)
+ return -ENOMEM;
+
+ r5_core->tcm_banks[j] = tcm;
+
+ /*
+ * In lockstep mode, get second core's TCM power domains id
+ * after first core TCM parsing is done as
+ */
+ if (j == tcm_reg_per_r5) {
+ /* dec first core node */
+ of_node_put(np);
+
+ /* get second core node */
+ np = of_get_next_child(cluster->dev->of_node, np);
+
+ /*
+ * reset index of power-domains property list
+ * for second core
+ */
+ tcm_pd_idx = 1;
+ }
+
+ /* get power-domains id of tcm */
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells",
+ tcm_pd_idx,
+ &out_args);
+ if (ret) {
+ dev_err(r5_core->dev,
+ "failed to get tcm %d pm domain, ret %d\n",
+ j, ret);
+ of_node_put(out_args.np);
+ return ret;
+ }
+ tcm->pm_domain_id = out_args.args[0];
+ of_node_put(out_args.np);
+ tcm_pd_idx++;
+
+ /*
+ * In lockstep mode, we only need second core's power domain
+ * ids. Other information from second core isn't needed so
+ * ignore it. This forms table as zynqmp_tcm_banks_lockstep
+ */
+ if (j >= tcm_reg_per_r5)
+ continue;
+
+ /* get tcm address without translation */
+ ret = of_property_read_reg(np, j, &abs_addr, &size);
+ if (ret) {
+ of_node_put(np);
+ dev_err(dev, "failed to get reg property\n");
+ return ret;
+ }
+
+ /*
+ * remote processor can address only 32 bits
+ * so convert 64-bits into 32-bits. This will discard
+ * any unwanted upper 32-bits.
+ */
+ tcm->da = (u32)abs_addr;
+ tcm->size = (u32)size;
+
+ cpdev = to_platform_device(dev);
+ res = platform_get_resource(cpdev, IORESOURCE_MEM, j);
+ if (!res) {
+ of_node_put(np);
+ dev_err(dev, "failed to get tcm resource\n");
+ return -EINVAL;
+ }
+
+ tcm->addr = (u32)res->start;
+ tcm->bank_name = (char *)res->name;
+ res = devm_request_mem_region(dev, tcm->addr, tcm->size,
+ tcm->bank_name);
+ if (!res) {
+ dev_err(dev, "failed to request tcm resource\n");
+ of_node_put(np);
+ return -EINVAL;
+ }
+ }
+ }
+
+ of_node_put(np);
+ return 0;
+}
+
/**
* zynqmp_r5_get_tcm_node()
* Ideally this function should parse tcm node and store information
@@ -956,10 +1089,19 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
struct zynqmp_r5_core *r5_core;
int ret, i;
- ret = zynqmp_r5_get_tcm_node(cluster);
- if (ret < 0) {
- dev_err(dev, "can't get tcm node, err %d\n", ret);
- return ret;
+ r5_core = cluster->r5_cores[0];
+ if (of_find_property(r5_core->np, "reg", NULL)) {
+ ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
+ if (ret) {
+ dev_err(dev, "can't get tcm node from dt, err %d\n", ret);
+ return ret;
+ }
+ } else {
+ ret = zynqmp_r5_get_tcm_node(cluster);
+ if (ret < 0) {
+ dev_err(dev, "can't get tcm node, err %d\n", ret);
+ return ret;
+ }
}
for (i = 0; i < cluster->core_count; i++) {
--
2.25.1
Powered by blists - more mailing lists