lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230222184135.GB909075@p14s>
Date:   Wed, 22 Feb 2023 11:41:35 -0700
From:   Mathieu Poirier <mathieu.poirier@...aro.org>
To:     Tanmay Shah <tanmay.shah@....com>
Cc:     michal.simek@....com, andersson@...nel.org,
        jaswinder.singh@...aro.org, ben.levinsky@....com,
        shubhrajyoti.datta@....com, linux-kernel@...r.kernel.org,
        linux-arm-kernel@...ts.infradead.org,
        linux-remoteproc@...r.kernel.org
Subject: Re: [PATCH v3 2/3] drivers: remoteproc: xilinx: fix carveout names

On Mon, Feb 13, 2023 at 01:18:25PM -0800, Tanmay Shah wrote:
> If the unit address is appended to node name of memory-region,
> then adding rproc carveouts fails as node name and unit-address
> both are passed as carveout name (i.e. vdev0vring0@...xxxxx). However,
> only node name is expected by remoteproc framework. This patch moves
> memory-region node parsing from driver probe to prepare and
> only passes node-name and not unit-address
> 
> Fixes: 6b291e8020a8 ("drivers: remoteproc: Add Xilinx r5 remoteproc driver")
> Signed-off-by: Tanmay Shah <tanmay.shah@....com>
> ---
> 
> Changelog:
>   - This is first version of this change, however posting as part of the series
>     that has version v3. The v2 of the series could be found at following link.
> 
> v2: https://lore.kernel.org/all/20230126213154.1707300-1-tanmay.shah@amd.com/
> 
>  drivers/remoteproc/xlnx_r5_remoteproc.c | 87 ++++++-------------------
>  1 file changed, 20 insertions(+), 67 deletions(-)
> 
> diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
> index 2db57d394155..81af2dea56c2 100644
> --- a/drivers/remoteproc/xlnx_r5_remoteproc.c
> +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
> @@ -61,8 +61,6 @@ static const struct mem_bank_data zynqmp_tcm_banks[] = {
>   * @np: device node of RPU instance
>   * @tcm_bank_count: number TCM banks accessible to this RPU
>   * @tcm_banks: array of each TCM bank data
> - * @rmem_count: Number of reserved mem regions
> - * @rmem: reserved memory region nodes from device tree
>   * @rproc: rproc handle
>   * @pm_domain_id: RPU CPU power domain id
>   */
> @@ -71,8 +69,6 @@ struct zynqmp_r5_core {
>  	struct device_node *np;
>  	int tcm_bank_count;
>  	struct mem_bank_data **tcm_banks;
> -	int rmem_count;
> -	struct reserved_mem **rmem;
>  	struct rproc *rproc;
>  	u32 pm_domain_id;
>  };
> @@ -239,21 +235,31 @@ static int add_mem_regions_carveout(struct rproc *rproc)
>  {
>  	struct rproc_mem_entry *rproc_mem;
>  	struct zynqmp_r5_core *r5_core;
> +	struct device_node *rmem_np;
>  	struct reserved_mem *rmem;
>  	int i, num_mem_regions;
>  
>  	r5_core = (struct zynqmp_r5_core *)rproc->priv;
> -	num_mem_regions = r5_core->rmem_count;
> +
> +	num_mem_regions = of_property_count_elems_of_size(r5_core->np, "memory-region",
> +							  sizeof(phandle));
>  
>  	for (i = 0; i < num_mem_regions; i++) {
> -		rmem = r5_core->rmem[i];
>

Extra line

Everyone else in the remoteproc subsystem is using of_phandle_iterator_next(),
please do the same.  It is easier to maintain and you don't have to call
of_node_put() after each iteration. 


> -		if (!strncmp(rmem->name, "vdev0buffer", strlen("vdev0buffer"))) {
> +		rmem_np = of_parse_phandle(r5_core->np, "memory-region", i);
> +
> +		rmem = of_reserved_mem_lookup(rmem_np);
> +		if (!rmem) {
> +			of_node_put(rmem_np);
> +			return -EINVAL;
> +		}
> +
> +		if (!strcmp(rmem_np->name, "vdev0buffer")) {
>  			/* Init reserved memory for vdev buffer */
>  			rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
>  								 rmem->size,
>  								 rmem->base,
> -								 rmem->name);
> +								 rmem_np->name);
>  		} else {
>  			/* Register associated reserved memory regions */
>  			rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
> @@ -261,16 +267,20 @@ static int add_mem_regions_carveout(struct rproc *rproc)
>  							 rmem->size, rmem->base,
>  							 zynqmp_r5_mem_region_map,
>  							 zynqmp_r5_mem_region_unmap,
> -							 rmem->name);
> +							 rmem_np->name);
>  		}
>  
> -		if (!rproc_mem)
> +		if (!rproc_mem) {
> +			of_node_put(rmem_np);

When moving to of_phandle_iterator_next(), of_node_put(it.node) has to be
called on error conditions.  Other drivers don't do it, something I will fix in
the next cycle.

>  			return -ENOMEM;
> +		}
>  
>  		rproc_add_carveout(rproc, rproc_mem);
>  
>  		dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
>  			rmem->name, rmem->base, rmem->size);
> +
> +		of_node_put(rmem_np);
>  	}
>  
>  	return 0;
> @@ -726,59 +736,6 @@ static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster)
>  	return 0;
>  }
>  
> -/**
> - * zynqmp_r5_get_mem_region_node()
> - * parse memory-region property and get reserved mem regions
> - *
> - * @r5_core: pointer to zynqmp_r5_core type object
> - *
> - * Return: 0 for success and error code for failure.
> - */
> -static int zynqmp_r5_get_mem_region_node(struct zynqmp_r5_core *r5_core)
> -{
> -	struct device_node *np, *rmem_np;
> -	struct reserved_mem **rmem;
> -	int res_mem_count, i;
> -	struct device *dev;
> -
> -	dev = r5_core->dev;
> -	np = r5_core->np;
> -
> -	res_mem_count = of_property_count_elems_of_size(np, "memory-region",
> -							sizeof(phandle));
> -	if (res_mem_count <= 0) {
> -		dev_warn(dev, "failed to get memory-region property %d\n",
> -			 res_mem_count);
> -		return 0;
> -	}
> -
> -	rmem = devm_kcalloc(dev, res_mem_count,
> -			    sizeof(struct reserved_mem *), GFP_KERNEL);
> -	if (!rmem)
> -		return -ENOMEM;
> -
> -	for (i = 0; i < res_mem_count; i++) {
> -		rmem_np = of_parse_phandle(np, "memory-region", i);
> -		if (!rmem_np)
> -			goto release_rmem;
> -
> -		rmem[i] = of_reserved_mem_lookup(rmem_np);
> -		if (!rmem[i]) {
> -			of_node_put(rmem_np);
> -			goto release_rmem;
> -		}
> -
> -		of_node_put(rmem_np);
> -	}
> -
> -	r5_core->rmem_count = res_mem_count;
> -	r5_core->rmem = rmem;
> -	return 0;
> -
> -release_rmem:
> -	return -EINVAL;
> -}
> -
>  /*
>   * zynqmp_r5_core_init()
>   * Create and initialize zynqmp_r5_core type object
> @@ -806,10 +763,6 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
>  	for (i = 0; i < cluster->core_count; i++) {
>  		r5_core = cluster->r5_cores[i];
>  
> -		ret = zynqmp_r5_get_mem_region_node(r5_core);
> -		if (ret)
> -			dev_warn(dev, "memory-region prop failed %d\n", ret);
> -
>  		/* Initialize r5 cores with power-domains parsed from dts */
>  		ret = of_property_read_u32_index(r5_core->np, "power-domains",
>  						 1, &r5_core->pm_domain_id);
> -- 
> 2.25.1
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ