[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b5582d8a-b7bf-49e2-a8a3-879275a881b2@ti.com>
Date: Thu, 30 May 2024 09:16:14 -0500
From: Andrew Davis <afd@...com>
To: Beleswar Padhi <b-padhi@...com>, <andersson@...nel.org>,
<mathieu.poirier@...aro.org>
CC: <hnagalla@...com>, <u-kumar1@...com>, <linux-remoteproc@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH 2/3] remoteproc: k3-r5: Acquire mailbox handle during
probe
On 5/30/24 4:07 AM, Beleswar Padhi wrote:
> Acquire the mailbox handle during device probe and do not release handle
> in stop/detach routine or error paths. This removes the redundant
> requests for mbox handle later during rproc start/attach. This also
> allows to defer remoteproc driver's probe if mailbox is not probed yet.
>
> Signed-off-by: Beleswar Padhi <b-padhi@...com>
> ---
> drivers/remoteproc/ti_k3_r5_remoteproc.c | 66 ++++++++----------------
> 1 file changed, 21 insertions(+), 45 deletions(-)
>
> diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
> index 26362a509ae3..157e8fd57665 100644
> --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
> +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
> @@ -391,6 +391,7 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
> struct mbox_client *client = &kproc->client;
> struct device *dev = kproc->dev;
> int ret;
> + long err;
>
> client->dev = dev;
> client->tx_done = NULL;
> @@ -400,10 +401,9 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
>
> kproc->mbox = mbox_request_channel(client, 0);
> if (IS_ERR(kproc->mbox)) {
> - ret = -EBUSY;
> - dev_err(dev, "mbox_request_channel failed: %ld\n",
> - PTR_ERR(kproc->mbox));
> - return ret;
> + err = PTR_ERR(kproc->mbox);
> + dev_err(dev, "mbox_request_channel failed: %ld\n", err);
> + return (err == -EPROBE_DEFER) ? -EPROBE_DEFER : -EBUSY;
Why turn all other errors into EBUSY? If you just return the error as-is you
can simply make these 3 lines just:
return dev_err_probe(dev, PTR_ERR(kproc->mbox), "mbox_request_channel failed\n");
> }
>
> /*
> @@ -552,10 +552,6 @@ static int k3_r5_rproc_start(struct rproc *rproc)
> u32 boot_addr;
> int ret;
>
> - ret = k3_r5_rproc_request_mbox(rproc);
> - if (ret)
> - return ret;
> -
> boot_addr = rproc->bootaddr;
> /* TODO: add boot_addr sanity checking */
> dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
> @@ -564,7 +560,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
> core = kproc->core;
> ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
> if (ret)
> - goto put_mbox;
> + goto out;
The label "out" doesn't do anything, just directly `return ret;` here and
in the other cases below.
>
> /* unhalt/run all applicable cores */
> if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
> @@ -581,12 +577,12 @@ static int k3_r5_rproc_start(struct rproc *rproc)
> dev_err(dev, "%s: can not start core 1 before core 0\n",
> __func__);
> ret = -EPERM;
> - goto put_mbox;
> + goto out;
> }
>
> ret = k3_r5_core_run(core);
> if (ret)
> - goto put_mbox;
> + goto out;
> }
>
> return 0;
> @@ -596,8 +592,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
> if (k3_r5_core_halt(core))
> dev_warn(core->dev, "core halt back failed\n");
> }
> -put_mbox:
> - mbox_free_channel(kproc->mbox);
> +out:
> return ret;
> }
>
> @@ -658,8 +653,6 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
> goto out;
> }
>
> - mbox_free_channel(kproc->mbox);
> -
> return 0;
>
> unroll_core_halt:
> @@ -674,42 +667,21 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
> /*
> * Attach to a running R5F remote processor (IPC-only mode)
> *
> - * The R5F attach callback only needs to request the mailbox, the remote
> - * processor is already booted, so there is no need to issue any TI-SCI
> - * commands to boot the R5F cores in IPC-only mode. This callback is invoked
> - * only in IPC-only mode.
> + * The R5F attach callback is a NOP. The remote processor is already booted, and
> + * all required resources have been acquired during probe routine, so there is
> + * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
> + * This callback is invoked only in IPC-only mode and exists for sanity sake.
> */
> -static int k3_r5_rproc_attach(struct rproc *rproc)
> -{
> - struct k3_r5_rproc *kproc = rproc->priv;
> - struct device *dev = kproc->dev;
> - int ret;
> -
> - ret = k3_r5_rproc_request_mbox(rproc);
> - if (ret)
> - return ret;
> -
> - dev_info(dev, "R5F core initialized in IPC-only mode\n");
> - return 0;
> -}
> +static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
>
> /*
> * Detach from a running R5F remote processor (IPC-only mode)
> *
> - * The R5F detach callback performs the opposite operation to attach callback
> - * and only needs to release the mailbox, the R5F cores are not stopped and
> - * will be left in booted state in IPC-only mode. This callback is invoked
> - * only in IPC-only mode.
> + * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
> + * left in booted state in IPC-only mode. This callback is invoked only in
> + * IPC-only mode and exists for sanity sake.
> */
> -static int k3_r5_rproc_detach(struct rproc *rproc)
> -{
> - struct k3_r5_rproc *kproc = rproc->priv;
> - struct device *dev = kproc->dev;
> -
> - mbox_free_channel(kproc->mbox);
> - dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
> - return 0;
> -}
> +static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
Do we still need to disable the mbox channel somehow here to prevent
receiving more messages from the detached core?
>
> /*
> * This function implements the .get_loaded_rsc_table() callback and is used
> @@ -1277,6 +1249,10 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
> kproc->rproc = rproc;
> core->rproc = rproc;
>
> + ret = k3_r5_rproc_request_mbox(rproc);
Now that we get the channel here in init you'll want to add a matching
mbox_free_channel() call to k3_r5_cluster_rproc_exit().
Andrew
> + if (ret)
> + return ret;
> +
> ret = k3_r5_rproc_configure_mode(kproc);
> if (ret < 0)
> goto err_config;
Powered by blists - more mailing lists