[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9a73014f-d973-24d6-57c9-0134c0967057@foss.st.com>
Date: Mon, 15 Feb 2021 14:19:34 +0100
From: Arnaud POULIQUEN <arnaud.pouliquen@...s.st.com>
To: Mathieu Poirier <mathieu.poirier@...aro.org>, <ohad@...ery.com>,
<bjorn.andersson@...aro.org>, <arnaud.pouliquen@...com>
CC: <robh+dt@...nel.org>, <mcoquelin.stm32@...il.com>,
<alexandre.torgue@...com>, <linux-remoteproc@...r.kernel.org>,
<devicetree@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>
Subject: Re: [PATCH v5 13/19] remoteproc: Properly deal with the resource
table
On 2/12/21 12:46 AM, Mathieu Poirier wrote:
> If it is possible to detach the remote processor, keep an untouched
> copy of the resource table. That way we can start from the same
> resource table without having to worry about original values or what
> elements the startup code has changed when re-attaching to the remote
> processor.
>
> Reported-by: Arnaud POULIQUEN <arnaud.pouliquen@...s.st.com>
> Signed-off-by: Mathieu Poirier <mathieu.poirier@...aro.org>
> ---
> drivers/remoteproc/remoteproc_core.c | 70 ++++++++++++++++++++++
> drivers/remoteproc/remoteproc_elf_loader.c | 24 +++++++-
> include/linux/remoteproc.h | 3 +
> 3 files changed, 95 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
> index 660dcc002ff6..9a77cb6d6470 100644
> --- a/drivers/remoteproc/remoteproc_core.c
> +++ b/drivers/remoteproc/remoteproc_core.c
> @@ -1527,7 +1527,9 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
> clean_up_resources:
> rproc_resource_cleanup(rproc);
> kfree(rproc->cached_table);
> + kfree(rproc->clean_table);
> rproc->cached_table = NULL;
> + rproc->clean_table = NULL;
> rproc->table_ptr = NULL;
> unprepare_rproc:
> /* release HW resources if needed */
> @@ -1555,6 +1557,23 @@ static int rproc_set_loaded_rsc_table(struct rproc *rproc)
> return ret;
> }
>
> + /*
> + * If it is possible to detach the remote processor, keep an untouched
> + * copy of the resource table. That way we can start fresh again when
> + * the remote processor is re-attached, that is:
> + *
> + * DETACHED -> ATTACHED -> DETACHED -> ATTACHED
> + *
> + * A clean copy of the table is also taken in rproc_elf_load_rsc_table()
> + * for cases where the remote processor is booted by the remoteproc
> + * core and later detached from.
> + */
> + if (rproc->ops->detach) {
> + rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL);
> + if (!rproc->clean_table)
> + return -ENOMEM;
> + }
> +
> /*
> * The resource table is already loaded in device memory, no need
> * to work with a cached table.
> @@ -1566,6 +1585,40 @@ static int rproc_set_loaded_rsc_table(struct rproc *rproc)
> return 0;
> }
>
> +static int rproc_reset_loaded_rsc_table(struct rproc *rproc)
> +{
> + /*
> + * In order to detach() from a remote processor a clean resource table
> + * _must_ have been allocated at boot time, either from rproc_fw_boot()
> + * or from rproc_attach(). If one isn't present something went really
> + * wrong and we must complain.
> + */
> + if (WARN_ON(!rproc->clean_table))
> + return -EINVAL;
> +
> + /*
> + * Install the clean resource table where the firmware, i.e
> + * rproc_get_loaded_rsc_table(), expects it.
> + */
> + memcpy(rproc->table_ptr, rproc->clean_table, rproc->table_sz);
> +
> + /*
> + * If the remote processors was started by the core then a cached_table
> + * is present and we must follow the same cleanup sequence as we would
> + * for a shutdown(). As it is in rproc_stop(), use the cached resource
> + * table for the rest of the detach process since ->table_ptr will
> + * become invalid as soon as carveouts are released in
> + * rproc_resource_cleanup().
> + *
> + * If the remote processor was started by an external entity the
> + * cached_table is NULL and the rest of the cleanup code in
> + * rproc_free_vring() can deal with that.
> + */
> + rproc->table_ptr = rproc->cached_table;
> +
> + return 0;
> +}
> +
> /*
> * Attach to remote processor - similar to rproc_fw_boot() but without
> * the steps that deal with the firmware image.
> @@ -1947,7 +2000,10 @@ void rproc_shutdown(struct rproc *rproc)
>
> /* Free the copy of the resource table */
> kfree(rproc->cached_table);
> + /* Free the clean resource table */
> + kfree(rproc->clean_table);
> rproc->cached_table = NULL;
> + rproc->clean_table = NULL;
> rproc->table_ptr = NULL;
> out:
> mutex_unlock(&rproc->lock);
> @@ -2000,6 +2056,16 @@ int rproc_detach(struct rproc *rproc)
> goto out;
> }
>
> + /*
> + * Install a clean resource table for re-attach while
> + * rproc->table_ptr is still valid.
> + */
> + ret = rproc_reset_loaded_rsc_table(rproc);
> + if (ret) {
> + atomic_inc(&rproc->power);
> + goto out;
> + }
> +
Here you rewrite the initial values in the loaded resource table but then
rproc_resource_cleanup will clean up the resource table.
That can lead to an overwrite, and perhaps to unexpected memory access, as
DA and PA addresses are reinitialized.
(e.g call of rproc_vdev_release that will overwrite the resource table)
And because the vdev release is asynchronous, probably better to reinitialize
the resource table on attach or in rproc_handle_resources.
Regards,
Arnaud
> /* clean up all acquired resources */
> rproc_resource_cleanup(rproc);
>
> @@ -2008,10 +2074,14 @@ int rproc_detach(struct rproc *rproc)
>
> rproc_disable_iommu(rproc);
>
> + /* Free the copy of the resource table */
> + kfree(rproc->cached_table);
> /* Follow the same sequence as in rproc_shutdown() */
> kfree(rproc->cached_table);
> rproc->cached_table = NULL;
> + rproc->clean_table = NULL;
> rproc->table_ptr = NULL;
> +
> out:
> mutex_unlock(&rproc->lock);
> return ret;
> diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
> index df68d87752e4..aa09782c932d 100644
> --- a/drivers/remoteproc/remoteproc_elf_loader.c
> +++ b/drivers/remoteproc/remoteproc_elf_loader.c
> @@ -17,10 +17,11 @@
>
> #define pr_fmt(fmt) "%s: " fmt, __func__
>
> -#include <linux/module.h>
> +#include <linux/elf.h>
> #include <linux/firmware.h>
> +#include <linux/module.h>
> #include <linux/remoteproc.h>
> -#include <linux/elf.h>
> +#include <linux/slab.h>
>
> #include "remoteproc_internal.h"
> #include "remoteproc_elf_helpers.h"
> @@ -338,6 +339,25 @@ int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw)
> if (!rproc->cached_table)
> return -ENOMEM;
>
> + /*
> + * If it is possible to detach the remote processor, keep an untouched
> + * copy of the resource table. That way we can start fresh again when
> + * the remote processor is re-attached, that is:
> + *
> + * OFFLINE -> RUNNING -> DETACHED -> ATTACHED
> + *
> + * A clean copy of the table is also taken in
> + * rproc_set_loaded_rsc_table() for cases where the remote processor is
> + * booted by an external entity and later detached from.
> + */
> + if (rproc->ops->detach) {
> + rproc->clean_table = kmemdup(table, tablesz, GFP_KERNEL);
> + if (!rproc->clean_table) {
> + kfree(rproc->cached_table);
> + return -ENOMEM;
> + }
> + }
> +
> rproc->table_ptr = rproc->cached_table;
> rproc->table_sz = tablesz;
>
> diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
> index e1c843c19cc6..e5f52a12a650 100644
> --- a/include/linux/remoteproc.h
> +++ b/include/linux/remoteproc.h
> @@ -514,6 +514,8 @@ struct rproc_dump_segment {
> * @recovery_disabled: flag that state if recovery was disabled
> * @max_notifyid: largest allocated notify id.
> * @table_ptr: pointer to the resource table in effect
> + * @clean_table: copy of the resource table without modifications. Used
> + * when a remote processor is attached or detached from the core
> * @cached_table: copy of the resource table
> * @table_sz: size of @cached_table
> * @has_iommu: flag to indicate if remote processor is behind an MMU
> @@ -550,6 +552,7 @@ struct rproc {
> bool recovery_disabled;
> int max_notifyid;
> struct resource_table *table_ptr;
> + struct resource_table *clean_table;
> struct resource_table *cached_table;
> size_t table_sz;
> bool has_iommu;
>
Powered by blists - more mailing lists