[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5976b2a5-6ff4-4d1d-8d8a-dca783f49799@quicinc.com>
Date: Fri, 30 Jun 2023 09:11:34 +0530
From: Pavan Kondeti <quic_pkondeti@...cinc.com>
To: Mukesh Ojha <quic_mojha@...cinc.com>
CC: Pavan Kondeti <quic_pkondeti@...cinc.com>, <corbet@....net>,
<agross@...nel.org>, <andersson@...nel.org>,
<konrad.dybcio@...aro.org>, <robh+dt@...nel.org>,
<krzysztof.kozlowski+dt@...aro.org>, <conor+dt@...nel.org>,
<keescook@...omium.org>, <tony.luck@...el.com>,
<gpiccoli@...lia.com>, <mathieu.poirier@...aro.org>,
<catalin.marinas@....com>, <will@...nel.org>,
<linus.walleij@...aro.org>, <andy.shevchenko@...il.com>,
<linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-arm-msm@...r.kernel.org>, <devicetree@...r.kernel.org>,
<linux-hardening@...r.kernel.org>,
<linux-remoteproc@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-gpio@...r.kernel.org>
Subject: Re: [PATCH v4 13/21] remoterproc: qcom: refactor to leverage
exported minidump symbol
On Thu, Jun 29, 2023 at 02:50:34PM +0530, Mukesh Ojha wrote:
>
>
> On 6/28/2023 9:21 PM, Pavan Kondeti wrote:
> > On Wed, Jun 28, 2023 at 06:04:40PM +0530, Mukesh Ojha wrote:
> > > -static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsystem *subsystem,
> > > - void (*rproc_dumpfn_t)(struct rproc *rproc, struct rproc_dump_segment *segment,
> > > - void *dest, size_t offset, size_t size))
> > > +void qcom_minidump(struct rproc *rproc, unsigned int minidump_id,
> > > + void (*rproc_dumpfn_t)(struct rproc *rproc,
> > > + struct rproc_dump_segment *segment, void *dest, size_t offset,
> > > + size_t size))
> > > {
> > > - struct minidump_region __iomem *ptr;
> > > - struct minidump_region region;
> > > - int seg_cnt, i;
> > > dma_addr_t da;
> > > size_t size;
> > > + int seg_cnt;
> > > char *name;
> > > + void *ptr;
> > > + int ret;
> > > + int i;
> > > if (WARN_ON(!list_empty(&rproc->dump_segments))) {
> > > dev_err(&rproc->dev, "dump segment list already populated\n");
> > > - return -EUCLEAN;
> > > + return;
> > > }
> > > - seg_cnt = le32_to_cpu(subsystem->region_count);
> > > - ptr = ioremap((unsigned long)le64_to_cpu(subsystem->regions_baseptr),
> > > - seg_cnt * sizeof(struct minidump_region));
> > > + ptr = qcom_ss_md_mapped_base(minidump_id, &seg_cnt);
> > > if (!ptr)
> > > - return -EFAULT;
> > > + return;
> > > for (i = 0; i < seg_cnt; i++) {
> > > - memcpy_fromio(®ion, ptr + i, sizeof(region));
> > > - if (le32_to_cpu(region.valid) == MINIDUMP_REGION_VALID) {
> > > - name = kstrndup(region.name, MAX_REGION_NAME_LENGTH - 1, GFP_KERNEL);
> > > - if (!name) {
> > > - iounmap(ptr);
> > > - return -ENOMEM;
> > > - }
> > > - da = le64_to_cpu(region.address);
> > > - size = le64_to_cpu(region.size);
> > > - rproc_coredump_add_custom_segment(rproc, da, size, rproc_dumpfn_t, name);
> > > + ret = qcom_ss_valid_segment_info(ptr, i, &name, &da, &size);
> > > + if (ret < 0) {
> > > + iounmap(ptr);
> > > + dev_err(&rproc->dev,
> > > + "Failed with error: %d while adding minidump entries\n",
> > > + ret);
> > > + goto clean_minidump;
> > > }
> > > - }
> > > -
> > > - iounmap(ptr);
> > > - return 0;
> > > -}
> > > -
> > > -void qcom_minidump(struct rproc *rproc, unsigned int minidump_id,
> > > - void (*rproc_dumpfn_t)(struct rproc *rproc,
> > > - struct rproc_dump_segment *segment, void *dest, size_t offset,
> > > - size_t size))
> > > -{
> > > - int ret;
> > > - struct minidump_subsystem *subsystem;
> > > - struct minidump_global_toc *toc;
> > > -
> > > - /* Get Global minidump ToC*/
> > > - toc = qcom_smem_get(QCOM_SMEM_HOST_ANY, SBL_MINIDUMP_SMEM_ID, NULL);
> > > -
> > > - /* check if global table pointer exists and init is set */
> > > - if (IS_ERR(toc) || !toc->status) {
> > > - dev_err(&rproc->dev, "Minidump TOC not found in SMEM\n");
> > > - return;
> > > - }
> > > - /* Get subsystem table of contents using the minidump id */
> > > - subsystem = &toc->subsystems[minidump_id];
> > > -
> > > - /**
> > > - * Collect minidump if SS ToC is valid and segment table
> > > - * is initialized in memory and encryption status is set.
> > > - */
> > > - if (subsystem->regions_baseptr == 0 ||
> > > - le32_to_cpu(subsystem->status) != 1 ||
> > > - le32_to_cpu(subsystem->enabled) != MINIDUMP_SS_ENABLED ||
> > > - le32_to_cpu(subsystem->encryption_status) != MINIDUMP_SS_ENCR_DONE) {
> > > - dev_err(&rproc->dev, "Minidump not ready, skipping\n");
> > > - return;
> > > + /* if it is a valid segment */
> > > + if (!ret)
> > > + rproc_coredump_add_custom_segment(rproc, da, size,
> > > + rproc_dumpfn_t, name);
> > > }
> > > - ret = qcom_add_minidump_segments(rproc, subsystem, rproc_dumpfn_t);
> > > - if (ret) {
> > > - dev_err(&rproc->dev, "Failed with error: %d while adding minidump entries\n", ret);
> > > - goto clean_minidump;
> > > - }
> > > + iounmap(ptr);
> > > rproc_coredump_using_sections(rproc);
> > > +
> > > clean_minidump:
> > > qcom_minidump_cleanup(rproc);
> > > }
> >
> > I like the idea of moving minidump pieces to drivers/soc/qcom/*minidump*.
> >
> > Is it possible to accept one function callback from remoteproc and do
> > all of this in one function exported by minidump?
> >
> > qcom_ss_valid_segment_info() seems a low level function to be exported..
>
> It was ending up with circular dependency due to
> rproc_coredump_add_custom_segment()
>
>
> rproc_coredump => qcom_common => qcom_minidump_smem => rproc_coredump
>
Where is the circular dependency here? Any API accepting callback would
end up like below
caller -> API(callback) -> (*callback) -> [in caller code ...]
May be I am missing something here.
AFAICS, the minidump could do everything except it does not know how to
create a segment and add it to rproc::dump_segments list. Is it not
possible to write a minidump API that accepts a callback and a list
head? we may not probably re-use rproc_coredump_add_custom_segment() as
is, but some refactoring allows/covers both cases..Pls give it a
thought.
This way we don't need to create an internal API like below
int qcom_ss_valid_segment_info(void *ptr, int i, char **name, dma_addr_t
*da, size_t *size)
Thanks,
Pavan
Powered by blists - more mailing lists