lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d9b2607c-fcf1-428a-aa49-2476b2907559@ti.com>
Date: Tue, 8 Apr 2025 13:44:24 +0530
From: Beleswar Prasad Padhi <b-padhi@...com>
To: Andrew Davis <afd@...com>, <andersson@...nel.org>,
        <mathieu.poirier@...aro.org>
CC: <hnagalla@...com>, <u-kumar1@...com>, <jm@...com>,
        <jan.kiszka@...mens.com>, <christophe.jaillet@...adoo.fr>,
        <jkangas@...hat.com>, <eballetbo@...hat.com>,
        <linux-remoteproc@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v9 01/26] remoteproc: k3-r5: Re-order internal memory
 initialization function


On 07/04/25 18:59, Andrew Davis wrote:
> On 3/17/25 7:05 AM, Beleswar Padhi wrote:
>> The core's internal memory data structure will be refactored to be part
>> of the k3_r5_rproc structure in a future commit. As a result, internal
>> memory initialization will need to be performed inside
>> k3_r5_cluster_rproc_init() after rproc_alloc().
>>
>> Therefore, move the internal memory initialization function,
>> k3_r5_core_of_get_internal_memories() above k3_r5_rproc_init() so that
>> it can be invoked from there.
>>
>> Signed-off-by: Beleswar Padhi <b-padhi@...com>
>> ---
>
> Just to keep things organized, does it make sense to also move
> the other k3_r5_core_of_get_*_memories() up with this?
>
> Also, you move k3_r5_release_tsp() up too but don't mention
> that in the commit message.


Sure, I will incorporate these changes in the next revision.

Thanks,
Beleswar

>
> Andrew
>
>>   drivers/remoteproc/ti_k3_r5_remoteproc.c | 158 +++++++++++------------
>>   1 file changed, 79 insertions(+), 79 deletions(-)
>>
>> diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c 
>> b/drivers/remoteproc/ti_k3_r5_remoteproc.c
>> index dbc513c5569c..b2738b9a1b2d 100644
>> --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
>> +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
>> @@ -1199,6 +1199,85 @@ static int k3_r5_rproc_configure_mode(struct 
>> k3_r5_rproc *kproc)
>>       return ret;
>>   }
>>   +static int k3_r5_core_of_get_internal_memories(struct 
>> platform_device *pdev,
>> +                           struct k3_r5_core *core)
>> +{
>> +    static const char * const mem_names[] = {"atcm", "btcm"};
>> +    struct device *dev = &pdev->dev;
>> +    struct resource *res;
>> +    int num_mems;
>> +    int i;
>> +
>> +    num_mems = ARRAY_SIZE(mem_names);
>> +    core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), 
>> GFP_KERNEL);
>> +    if (!core->mem)
>> +        return -ENOMEM;
>> +
>> +    for (i = 0; i < num_mems; i++) {
>> +        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
>> +                           mem_names[i]);
>> +        if (!res) {
>> +            dev_err(dev, "found no memory resource for %s\n",
>> +                mem_names[i]);
>> +            return -EINVAL;
>> +        }
>> +        if (!devm_request_mem_region(dev, res->start,
>> +                         resource_size(res),
>> +                         dev_name(dev))) {
>> +            dev_err(dev, "could not request %s region for resource\n",
>> +                mem_names[i]);
>> +            return -EBUSY;
>> +        }
>> +
>> +        /*
>> +         * TCMs are designed in general to support RAM-like backing
>> +         * memories. So, map these as Normal Non-Cached memories. This
>> +         * also avoids/fixes any potential alignment faults due to
>> +         * unaligned data accesses when using memcpy() or memset()
>> +         * functions (normally seen with device type memory).
>> +         */
>> +        core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
>> +                            resource_size(res));
>> +        if (!core->mem[i].cpu_addr) {
>> +            dev_err(dev, "failed to map %s memory\n", mem_names[i]);
>> +            return -ENOMEM;
>> +        }
>> +        core->mem[i].bus_addr = res->start;
>> +
>> +        /*
>> +         * TODO:
>> +         * The R5F cores can place ATCM & BTCM anywhere in its address
>> +         * based on the corresponding Region Registers in the System
>> +         * Control coprocessor. For now, place ATCM and BTCM at
>> +         * addresses 0 and 0x41010000 (same as the bus address on AM65x
>> +         * SoCs) based on loczrama setting
>> +         */
>> +        if (!strcmp(mem_names[i], "atcm")) {
>> +            core->mem[i].dev_addr = core->loczrama ?
>> +                            0 : K3_R5_TCM_DEV_ADDR;
>> +        } else {
>> +            core->mem[i].dev_addr = core->loczrama ?
>> +                            K3_R5_TCM_DEV_ADDR : 0;
>> +        }
>> +        core->mem[i].size = resource_size(res);
>> +
>> +        dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 
>> 0x%x\n",
>> +            mem_names[i], &core->mem[i].bus_addr,
>> +            core->mem[i].size, core->mem[i].cpu_addr,
>> +            core->mem[i].dev_addr);
>> +    }
>> +    core->num_mems = num_mems;
>> +
>> +    return 0;
>> +}
>> +
>> +static void k3_r5_release_tsp(void *data)
>> +{
>> +    struct ti_sci_proc *tsp = data;
>> +
>> +    ti_sci_proc_release(tsp);
>> +}
>> +
>>   static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
>>   {
>>       struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
>> @@ -1358,78 +1437,6 @@ static void k3_r5_cluster_rproc_exit(void *data)
>>       }
>>   }
>>   -static int k3_r5_core_of_get_internal_memories(struct 
>> platform_device *pdev,
>> -                           struct k3_r5_core *core)
>> -{
>> -    static const char * const mem_names[] = {"atcm", "btcm"};
>> -    struct device *dev = &pdev->dev;
>> -    struct resource *res;
>> -    int num_mems;
>> -    int i;
>> -
>> -    num_mems = ARRAY_SIZE(mem_names);
>> -    core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), 
>> GFP_KERNEL);
>> -    if (!core->mem)
>> -        return -ENOMEM;
>> -
>> -    for (i = 0; i < num_mems; i++) {
>> -        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
>> -                           mem_names[i]);
>> -        if (!res) {
>> -            dev_err(dev, "found no memory resource for %s\n",
>> -                mem_names[i]);
>> -            return -EINVAL;
>> -        }
>> -        if (!devm_request_mem_region(dev, res->start,
>> -                         resource_size(res),
>> -                         dev_name(dev))) {
>> -            dev_err(dev, "could not request %s region for resource\n",
>> -                mem_names[i]);
>> -            return -EBUSY;
>> -        }
>> -
>> -        /*
>> -         * TCMs are designed in general to support RAM-like backing
>> -         * memories. So, map these as Normal Non-Cached memories. This
>> -         * also avoids/fixes any potential alignment faults due to
>> -         * unaligned data accesses when using memcpy() or memset()
>> -         * functions (normally seen with device type memory).
>> -         */
>> -        core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
>> -                            resource_size(res));
>> -        if (!core->mem[i].cpu_addr) {
>> -            dev_err(dev, "failed to map %s memory\n", mem_names[i]);
>> -            return -ENOMEM;
>> -        }
>> -        core->mem[i].bus_addr = res->start;
>> -
>> -        /*
>> -         * TODO:
>> -         * The R5F cores can place ATCM & BTCM anywhere in its address
>> -         * based on the corresponding Region Registers in the System
>> -         * Control coprocessor. For now, place ATCM and BTCM at
>> -         * addresses 0 and 0x41010000 (same as the bus address on AM65x
>> -         * SoCs) based on loczrama setting
>> -         */
>> -        if (!strcmp(mem_names[i], "atcm")) {
>> -            core->mem[i].dev_addr = core->loczrama ?
>> -                            0 : K3_R5_TCM_DEV_ADDR;
>> -        } else {
>> -            core->mem[i].dev_addr = core->loczrama ?
>> -                            K3_R5_TCM_DEV_ADDR : 0;
>> -        }
>> -        core->mem[i].size = resource_size(res);
>> -
>> -        dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 
>> 0x%x\n",
>> -            mem_names[i], &core->mem[i].bus_addr,
>> -            core->mem[i].size, core->mem[i].cpu_addr,
>> -            core->mem[i].dev_addr);
>> -    }
>> -    core->num_mems = num_mems;
>> -
>> -    return 0;
>> -}
>> -
>>   static int k3_r5_core_of_get_sram_memories(struct platform_device 
>> *pdev,
>>                          struct k3_r5_core *core)
>>   {
>> @@ -1487,13 +1494,6 @@ static int 
>> k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
>>       return 0;
>>   }
>>   -static void k3_r5_release_tsp(void *data)
>> -{
>> -    struct ti_sci_proc *tsp = data;
>> -
>> -    ti_sci_proc_release(tsp);
>> -}
>> -
>>   static int k3_r5_core_of_init(struct platform_device *pdev)
>>   {
>>       struct device *dev = &pdev->dev;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ