[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b455d4f7-0347-ac07-6d41-32b3f06c4f0a@quicinc.com>
Date: Mon, 23 Oct 2023 19:43:54 +0800
From: Zhenhua Huang <quic_zhenhuah@...cinc.com>
To: Krzysztof Kozlowski <krzysztof.kozlowski@...aro.org>,
<agross@...nel.org>, <andersson@...nel.org>,
<konrad.dybcio@...aro.org>, <robh+dt@...nel.org>,
<krzysztof.kozlowski+dt@...aro.org>, <conor+dt@...nel.org>
CC: <linux-arm-msm@...r.kernel.org>, <devicetree@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <kernel@...cinc.com>,
<quic_tingweiz@...cinc.com>
Subject: Re: [PATCH v1 3/5] soc: qcom: memory_dump: Add memory dump driver
On 2023/10/23 17:31, Krzysztof Kozlowski wrote:
> On 23/10/2023 11:20, Zhenhua Huang wrote:
>> Qualcomm memory dump driver initializes system memory dump table.
>> Firmware dumps system cache, internal memory, peripheral registers
>> to DDR as per this table after system crashes and warm resets. The
>> driver reserves memory, populates ids and sizes for firmware dumping
>> according to the configuration.
>>
>> Signed-off-by: Zhenhua Huang <quic_zhenhuah@...cinc.com>
>> ---
>
> ...
>
>> +
>> +/* populate allocated region */
>> +static int __init mem_dump_populate_mem(struct device *dev,
>> + struct page *start_page,
>> + size_t total_size)
>> +{
>> + struct qcom_memory_dump *memdump = dev_get_drvdata(dev);
>> + struct qcom_dump_entry dump_entry;
>> + struct qcom_dump_data *dump_data;
>> + struct xbc_node *linked_list;
>> + phys_addr_t phys_end_addr;
>> + phys_addr_t phys_addr;
>> + const char *size_p;
>> + void *dump_vaddr;
>> + const char *id_p;
>> + int ret = 0;
>> + int size;
>> + int id;
>> +
>> + phys_addr = page_to_phys(start_page);
>> + phys_end_addr = phys_addr + total_size;
>> + dump_vaddr = page_to_virt(start_page);
>> +
>> + ret = mem_dump_register_data_table(dev, dump_vaddr, phys_addr);
>> + if (ret) {
>> + dev_err_probe(dev, ret, "Mem Dump table set up is failed\n");
>> + return ret;
>
> That's not the syntax. Syntax is return dev_err_probe
>
Got it, Thanks.
>> + }
>> +
>> + ret = qcom_init_memdump_imem_area(dev, total_size);
>> + if (ret)
>> + return ret;
>> +
>> + /* Apply two tables: QCOM_DUMP_TYPE_TABLE and QCOM_DUMP_TYPE_DATA */
>> + mem_dump_apply_offset(&dump_vaddr, &phys_addr,
>> + sizeof(struct qcom_dump_table) * 2);
>> +
>> + /* Both "id" and "size" must be present */
>> + xbc_node_for_each_subkey(memdump->mem_dump_node, linked_list) {
>> + const char *name = xbc_node_get_data(linked_list);
>> +
>> + if (!name)
>> + continue;
>> +
>> + id_p = xbc_node_find_value(linked_list, "id", NULL);
>> + size_p = xbc_node_find_value(linked_list, "size", NULL);
>> +
>> + if (id_p && size_p) {
>> + ret = kstrtoint(id_p, 0, &id);
>> + if (ret)
>> + continue;
>> +
>> + ret = kstrtoint(size_p, 0, &size);
>> +
>> + if (ret)
>> + continue;
>> +
>> + /*
>> + * Physical layout: starting from two qcom_dump_data.
>> + * Following are respective dump meta data and reserved regions.
>> + * Qcom_dump_data is populated by the driver, fw parse it
>> + * and dump respective info into dump mem.
>> + * Illustrate the layout:
>> + *
>> + * +------------------------+------------------------+
>> + * | qcom_dump_table(TABLE) | qcom_dump_table(DATA) |
>> + * +------------------------+------------------------+
>> + * +-------------+----------+-------------+----------+
>> + * |qcom_dump_data| dump mem|qcom_dump_data| dump mem |
>> + * +-------------+----------+-------------+----------+
>> + * +-------------+----------+-------------+----------+
>> + * |qcom_dump_data| dump mem|qcom_dump_data| dump mem |
>> + * +-------------+----------+-------------+----------+
>> + * ...
>> + */
>
> You have wrong indentation here.
Thanks for catching.
>
>> + dump_data = dump_vaddr;
>> + dump_data->addr = phys_addr + QCOM_DUMP_DATA_SIZE;
>> + dump_data->len = size;
>> + dump_entry.id = id;
>> + strscpy(dump_data->name, name,
>> + sizeof(dump_data->name));
>> + dump_entry.addr = phys_addr;
>> + ret = mem_dump_data_register(dev, QCOM_DUMP_TABLE_LINUX,
>> + &dump_entry);
>> + if (ret) {
>> + dev_err_probe(dev, ret, "Dump data setup failed, id = %d\n",
>> + id);
>
> Syntax is return dev_err_probe
>
>> + return ret;
>> + }
>> +
>> + mem_dump_apply_offset(&dump_vaddr, &phys_addr,
>> + size + QCOM_DUMP_DATA_SIZE);
>> + if (phys_addr > phys_end_addr) {
>> + dev_err_probe(dev, -ENOMEM, "Exceeding allocated region\n");
>
> ENOMEM? Does not look right then.
ENOMEM means the memory allocated not enough? any suggestion? Thanks.
>
>> + return -ENOMEM;
>> + }
>> + } else {
>> + continue;
>> + }
>> + }
>> +
>> + return ret;
>> +}
>> +
>> +static int __init mem_dump_probe(struct platform_device *pdev)
>> +{
>> + struct qcom_memory_dump *memdump;
>> + struct device *dev = &pdev->dev;
>> + struct page *page;
>> + size_t total_size;
>> + int ret = 0;
>> +
>> + memdump = devm_kzalloc(dev, sizeof(struct qcom_memory_dump),
>> + GFP_KERNEL);
>> + if (!memdump)
>> + return -ENOMEM;
>> +
>> + dev_set_drvdata(dev, memdump);
>> +
>> + /* check and initiate CMA region */
>> + ret = mem_dump_reserve_mem(dev);
>> + if (ret)
>> + return ret;
>> +
>> + /* allocate and populate */
>> + page = mem_dump_alloc_mem(dev, &total_size);
>> + if (IS_ERR(page)) {
>> + ret = PTR_ERR(page);
>> + dev_err_probe(dev, ret, "mem dump alloc failed\n");
>
> No, the syntax is:
> ret = dev_err_probe
>
> But why do you print messgaes for memory allocations?
Do you think it's useless because mm codes should print error as well if
failure ?
>
>> + goto release;
>> + }
>> +
>> + ret = mem_dump_populate_mem(dev, page, total_size);
>> + if (!ret)
>> + dev_info(dev, "Mem dump region populated successfully\n");
>
> Drop simple probe success messages. Not needed.
OK
>
>> + else
>> + goto free;
>> +
>> + return 0;
>> +
>> +free:
>> + cma_release(dev_get_cma_area(dev), page, (total_size / PAGE_SIZE));
>> +
>> +release:
>> + of_reserved_mem_device_release(dev);
>
> Where is cleanup on unbind?
>
Will add, Thanks for pointing out.
>> + return ret;
>> +}
>> +
>> +static const struct of_device_id mem_dump_match_table[] = {
>> + {.compatible = "qcom,mem-dump",},
>> + {}
>> +};
>> +
> Best regards,
> Krzysztof
>
Thanks,
Zhenhua
Powered by blists - more mailing lists