[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zn8htULTLxyfMiWk@p14s>
Date: Fri, 28 Jun 2024 14:48:53 -0600
From: Mathieu Poirier <mathieu.poirier@...aro.org>
To: Richard Genoud <richard.genoud@...tlin.com>
Cc: Bjorn Andersson <andersson@...nel.org>,
Philipp Zabel <p.zabel@...gutronix.de>, Suman Anna <s-anna@...com>,
Thomas Petazzoni <thomas.petazzoni@...tlin.com>,
Alexandre Belloni <alexandre.belloni@...tlin.com>,
Udit Kumar <u-kumar1@...com>,
Thomas Richard <thomas.richard@...tlin.com>,
Gregory CLEMENT <gregory.clement@...tlin.com>,
Hari Nagalla <hnagalla@...com>,
Théo Lebrun <theo.lebrun@...tlin.com>,
linux-remoteproc@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/4] remoteproc: k3-r5: Introduce PM suspend/resume
handlers
On Fri, Jun 21, 2024 at 05:00:56PM +0200, Richard Genoud wrote:
> This patch adds the support for system suspend/resume to the ti_k3_R5
> remoteproc driver.
>
> In order to save maximum power, the approach here is to shutdown
> completely the cores that were started by the kernel (i.e. those in
> RUNNING state).
> Those which were started before the kernel (in attached mode) will be
> detached.
>
> The pm_notifier mechanism is used here because the remote procs firmwares
> have to be reloaded at resume, and thus the driver must have access to
> the file system were the firmware is stored.
>
> On suspend, the running remote procs are stopped, the attached remote
> procs are detached and processor control released.
>
> On resume, the reverse operation is done.
>
> Based on work from: Hari Nagalla <hnagalla@...com>
>
> Signed-off-by: Richard Genoud <richard.genoud@...tlin.com>
> ---
> drivers/remoteproc/ti_k3_r5_remoteproc.c | 123 ++++++++++++++++++++++-
> 1 file changed, 121 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
> index 39a47540c590..1f18b08618c8 100644
> --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
> +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
> @@ -20,6 +20,7 @@
> #include <linux/platform_device.h>
> #include <linux/pm_runtime.h>
> #include <linux/remoteproc.h>
> +#include <linux/suspend.h>
> #include <linux/reset.h>
> #include <linux/slab.h>
>
> @@ -112,6 +113,7 @@ struct k3_r5_cluster {
> struct list_head cores;
> wait_queue_head_t core_transition;
> const struct k3_r5_soc_data *soc_data;
> + struct notifier_block pm_notifier;
> };
>
> /**
> @@ -577,7 +579,8 @@ static int k3_r5_rproc_start(struct rproc *rproc)
> /* do not allow core 1 to start before core 0 */
> core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
> elem);
> - if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
> + if (core != core0 && (core0->rproc->state == RPROC_OFFLINE ||
> + core0->rproc->state == RPROC_SUSPENDED)) {
If I understand correctly, this is to address a possible race condition between
user space wanting to start core1 via sysfs while the system is being suspended.
Is this correct? If so, please add a comment to explain what is going on.
Otherwise a comment is obviously needed.
> dev_err(dev, "%s: can not start core 1 before core 0\n",
> __func__);
> ret = -EPERM;
> @@ -646,7 +649,8 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
> /* do not allow core 0 to stop before core 1 */
> core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
> elem);
> - if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
> + if (core != core1 && core1->rproc->state != RPROC_OFFLINE &&
> + core1->rproc->state != RPROC_SUSPENDED) {
> dev_err(dev, "%s: can not stop core 0 before core 1\n",
> __func__);
> ret = -EPERM;
> @@ -1238,6 +1242,117 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
> return ret;
> }
>
> +static int k3_r5_rproc_suspend(struct k3_r5_rproc *kproc)
> +{
> + unsigned int rproc_state = kproc->rproc->state;
> + int ret;
> +
> + if (rproc_state != RPROC_RUNNING && rproc_state != RPROC_ATTACHED)
> + return 0;
> +
> + if (rproc_state == RPROC_RUNNING)
> + ret = rproc_shutdown(kproc->rproc);
> + else
> + ret = rproc_detach(kproc->rproc);
> +
> + if (ret) {
> + dev_err(kproc->dev, "Failed to %s rproc (%d)\n",
> + (rproc_state == RPROC_RUNNING) ? "shutdown" : "detach",
> + ret);
> + return ret;
> + }
> +
> + kproc->rproc->state = RPROC_SUSPENDED;
> +
> + return ret;
> +}
> +
> +static int k3_r5_rproc_resume(struct k3_r5_rproc *kproc)
> +{
> + int ret;
> +
> + if (kproc->rproc->state != RPROC_SUSPENDED)
> + return 0;
> +
> + ret = k3_r5_rproc_configure_mode(kproc);
> + if (ret < 0)
> + return -EBUSY;
> +
> + /*
> + * ret > 0 for IPC-only mode
> + * ret == 0 for remote proc mode
> + */
> + if (ret == 0) {
> + /*
> + * remote proc looses its configuration when powered off.
> + * So, we have to configure it again on resume.
> + */
> + ret = k3_r5_rproc_configure(kproc);
> + if (ret < 0) {
> + dev_err(kproc->dev, "k3_r5_rproc_configure failed (%d)\n", ret);
> + return -EBUSY;
> + }
> + }
> +
> + return rproc_boot(kproc->rproc);
> +}
> +
> +static int k3_r5_cluster_pm_notifier_call(struct notifier_block *bl,
> + unsigned long state, void *unused)
> +{
> + struct k3_r5_cluster *cluster = container_of(bl, struct k3_r5_cluster,
> + pm_notifier);
> + struct k3_r5_core *core;
> + int ret;
> +
> + switch (state) {
> + case PM_HIBERNATION_PREPARE:
> + case PM_RESTORE_PREPARE:
> + case PM_SUSPEND_PREPARE:
> + /* core1 should be suspended before core0 */
> + list_for_each_entry_reverse(core, &cluster->cores, elem) {
> + /*
> + * In LOCKSTEP mode, rproc is allocated only for
> + * core0
> + */
> + if (core->rproc) {
> + ret = k3_r5_rproc_suspend(core->rproc->priv);
> + if (ret)
> + dev_warn(core->dev,
> + "k3_r5_rproc_suspend failed (%d)\n", ret);
> + }
> +
> + ret = ti_sci_proc_release(core->tsp);
> + if (ret)
> + dev_warn(core->dev, "ti_sci_proc_release failed (%d)\n", ret);
> + }
> + break;
> + case PM_POST_HIBERNATION:
> + case PM_POST_RESTORE:
> + case PM_POST_SUSPEND:
> + /* core0 should be started before core1 */
> + list_for_each_entry(core, &cluster->cores, elem) {
> + ret = ti_sci_proc_request(core->tsp);
> + if (ret)
> + dev_warn(core->dev, "ti_sci_proc_request failed (%d)\n", ret);
> +
> + /*
> + * In LOCKSTEP mode, rproc is allocated only for
> + * core0
> + */
> + if (core->rproc) {
> + ret = k3_r5_rproc_resume(core->rproc->priv);
> + if (ret)
> + dev_warn(core->dev,
> + "k3_r5_rproc_resume failed (%d)\n", ret);
> + }
> + }
> + break;
> + }
> +
> + return 0;
> +}
> +
> static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
> {
> struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
> @@ -1336,6 +1451,9 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
> }
> }
>
> + cluster->pm_notifier.notifier_call = k3_r5_cluster_pm_notifier_call;
> + register_pm_notifier(&cluster->pm_notifier);
> +
> return 0;
>
> err_split:
> @@ -1402,6 +1520,7 @@ static void k3_r5_cluster_rproc_exit(void *data)
> rproc_free(rproc);
> core->rproc = NULL;
> }
> + unregister_pm_notifier(&cluster->pm_notifier);
> }
>
> static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
Powered by blists - more mailing lists