lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <84ff0dd8-706e-4b44-5313-3dd77b83100c@linaro.org>
Date:   Mon, 2 Oct 2023 10:23:41 +0100
From:   Srinivas Kandagatla <srinivas.kandagatla@...aro.org>
To:     Miquel Raynal <miquel.raynal@...tlin.com>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc:     Michael Walle <michael@...le.cc>,
        Thomas Petazzoni <thomas.petazzoni@...tlin.com>,
        Robert Marko <robert.marko@...tura.hr>,
        Luka Perkov <luka.perkov@...tura.hr>,
        linux-kernel@...r.kernel.org, Randy Dunlap <rdunlap@...radead.org>,
        Chen-Yu Tsai <wenst@...omium.org>,
        Daniel Golle <daniel@...rotopia.org>,
        Rafał Miłecki <rafal@...ecki.pl>
Subject: Re: [PATCH v10 1/3] nvmem: core: Rework layouts to become platform
 devices

Thanks Miquel for the work on this.
I have one comment below.


On 22/09/2023 18:48, Miquel Raynal wrote:
> Current layout support was initially written without modules support in
> mind. When the requirement for module support rose, the existing base
> was improved to adopt modularization support, but kind of a design flaw
> was introduced. With the existing implementation, when a storage device
> registers into NVMEM, the core tries to hook a layout (if any) and
> populates its cells immediately. This means, if the hardware description
> expects a layout to be hooked up, but no driver was provided for that,
> the storage medium will fail to probe and try later from
> scratch. Technically, the layouts are more like a "plus" and, even we
> consider that the hardware description shall be correct, we could still
> probe the storage device (especially if it contains the rootfs).
> 
> One way to overcome this situation is to consider the layouts as
> devices, and leverage the existing notifier mechanism. When a new NVMEM
> device is registered, we can:
> - populate its nvmem-layout child, if any
> - try to modprobe the relevant driver, if relevant
> - try to hook the NVMEM device with a layout in the notifier
> And when a new layout is registered:
> - try to hook all the existing NVMEM devices which are not yet hooked to
>    a layout with the new layout
> This way, there is no strong order to enforce, any NVMEM device creation
> or NVMEM layout driver insertion will be observed as a new event which
> may lead to the creation of additional cells, without disturbing the
> probes with costly (and sometimes endless) deferrals.
> 
> Signed-off-by: Miquel Raynal <miquel.raynal@...tlin.com>
> ---
>   drivers/nvmem/core.c             | 140 ++++++++++++++++++++++++-------
>   drivers/nvmem/layouts/onie-tlv.c |  39 +++++++--
>   drivers/nvmem/layouts/sl28vpd.c  |  39 +++++++--
>   include/linux/nvmem-provider.h   |  11 +--
>   4 files changed, 180 insertions(+), 49 deletions(-)
> 
> diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
> index eaf6a3fe8ca6..14dd3ae777bb 100644
> --- a/drivers/nvmem/core.c
> +++ b/drivers/nvmem/core.c
> @@ -17,11 +17,13 @@
>   #include <linux/nvmem-provider.h>
>   #include <linux/gpio/consumer.h>
>   #include <linux/of.h>
> +#include <linux/of_platform.h>
>   #include <linux/slab.h>
>   
>   struct nvmem_device {
>   	struct module		*owner;
>   	struct device		dev;
> +	struct list_head	node;
>   	int			stride;
>   	int			word_size;
>   	int			id;
> @@ -75,6 +77,7 @@ static LIST_HEAD(nvmem_cell_tables);
>   static DEFINE_MUTEX(nvmem_lookup_mutex);
>   static LIST_HEAD(nvmem_lookup_list);
>   
> +struct notifier_block nvmem_nb;
>   static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
>   
>   static DEFINE_SPINLOCK(nvmem_layout_lock);
> @@ -790,23 +793,16 @@ EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
>   static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
>   {
>   	struct device_node *layout_np;
> -	struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
> +	struct nvmem_layout *l, *layout = NULL;
>   
>   	layout_np = of_nvmem_layout_get_container(nvmem);
>   	if (!layout_np)
>   		return NULL;
>   
> -	/*
> -	 * In case the nvmem device was built-in while the layout was built as a
> -	 * module, we shall manually request the layout driver loading otherwise
> -	 * we'll never have any match.
> -	 */
> -	of_request_module(layout_np);
> -
>   	spin_lock(&nvmem_layout_lock);
>   
>   	list_for_each_entry(l, &nvmem_layouts, node) {
> -		if (of_match_node(l->of_match_table, layout_np)) {
> +		if (of_match_node(l->dev->driver->of_match_table, layout_np)) {
>   			if (try_module_get(l->owner))
>   				layout = l;
>   
> @@ -863,7 +859,7 @@ const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
>   	const struct of_device_id *match;
>   
>   	layout_np = of_nvmem_layout_get_container(nvmem);
> -	match = of_match_node(layout->of_match_table, layout_np);
> +	match = of_match_node(layout->dev->driver->of_match_table, layout_np);
>   
>   	return match ? match->data : NULL;
>   }
> @@ -882,6 +878,7 @@ EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
>   struct nvmem_device *nvmem_register(const struct nvmem_config *config)
>   {
>   	struct nvmem_device *nvmem;
> +	struct device_node *layout_np;
>   	int rval;
>   
>   	if (!config->dev)
> @@ -974,19 +971,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
>   			goto err_put_device;
>   	}
>   
> -	/*
> -	 * If the driver supplied a layout by config->layout, the module
> -	 * pointer will be NULL and nvmem_layout_put() will be a noop.
> -	 */
> -	nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
> -	if (IS_ERR(nvmem->layout)) {
> -		rval = PTR_ERR(nvmem->layout);
> -		nvmem->layout = NULL;
> -
> -		if (rval == -EPROBE_DEFER)
> -			goto err_teardown_compat;
> -	}
> -
>   	if (config->cells) {
>   		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
>   		if (rval)
> @@ -1005,24 +989,27 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
>   	if (rval)
>   		goto err_remove_cells;
>   
> -	rval = nvmem_add_cells_from_layout(nvmem);
> -	if (rval)
> -		goto err_remove_cells;
> -
>   	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
>   
>   	rval = device_add(&nvmem->dev);
>   	if (rval)
>   		goto err_remove_cells;
>   
> +	/* Populate layouts as devices */
> +	layout_np = of_nvmem_layout_get_container(nvmem);
> +	if (layout_np) {
> +		rval = of_platform_populate(nvmem->dev.of_node, NULL, NULL, NULL);
> +		of_node_put(layout_np);
> +		if (rval)
> +			goto err_remove_cells;
> +	}
> +
>   	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
>   
>   	return nvmem;
>   
>   err_remove_cells:
>   	nvmem_device_remove_all_cells(nvmem);
> -	nvmem_layout_put(nvmem->layout);
> -err_teardown_compat:
>   	if (config->compat)
>   		nvmem_sysfs_remove_compat(nvmem, config);
>   err_put_device:
> @@ -2124,13 +2111,106 @@ const char *nvmem_dev_name(struct nvmem_device *nvmem)
>   }
>   EXPORT_SYMBOL_GPL(nvmem_dev_name);
>   
> +static void nvmem_try_loading_layout_driver(struct nvmem_device *nvmem)
> +{
> +	struct device_node *layout_np;
> +
> +	layout_np = of_nvmem_layout_get_container(nvmem);
> +	if (layout_np) {
> +		of_request_module(layout_np);
> +		of_node_put(layout_np);
> +	}
> +}
> +
> +static int nvmem_match_available_layout(struct nvmem_device *nvmem)
> +{
> +	int ret;
> +
> +	if (nvmem->layout)
> +		return 0;
> +
> +	nvmem->layout = nvmem_layout_get(nvmem);
> +	if (!nvmem->layout)
> +		return 0;
> +
> +	ret = nvmem_add_cells_from_layout(nvmem);
> +	if (ret) {
> +		nvmem_layout_put(nvmem->layout);
> +		nvmem->layout = NULL;
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static int nvmem_dev_match_available_layout(struct device *dev, void *data)
> +{
> +	struct nvmem_device *nvmem = to_nvmem_device(dev);
> +
> +	return nvmem_match_available_layout(nvmem);
> +}
> +
> +static int nvmem_for_each_dev(int (*fn)(struct device *dev, void *data))
> +{
> +	return bus_for_each_dev(&nvmem_bus_type, NULL, NULL, fn);
> +}
> +
> +/*
> + * When an NVMEM device is registered, try to match against a layout and
> + * populate the cells. When an NVMEM layout is probed, ensure all NVMEM devices
> + * which could use it properly expose their cells.
> + */
> +static int nvmem_notifier_call(struct notifier_block *notifier,
> +			       unsigned long event_flags, void *context)
> +{
> +	struct nvmem_device *nvmem = NULL;
> +	int ret;
> +
> +	switch (event_flags) {
> +	case NVMEM_ADD:
> +		nvmem = context;
> +		break;
> +	case NVMEM_LAYOUT_ADD:
> +		break;
> +	default:
> +		return NOTIFY_DONE;
> +	}

It looks bit unnatural for core to register notifier for its own events.


Why do we need the notifier at core level, can we not just handle this 
in core before raising these events, instead of registering a notifier cb?


--srini


> +
> +	if (nvmem) {
> +		/*
> +		 * In case the nvmem device was built-in while the layout was
> +		 * built as a module, manually request loading the layout driver.
> +		 */
> +		nvmem_try_loading_layout_driver(nvmem);
> +
> +		/* Populate the cells of the new nvmem device from its layout, if any */
> +		ret = nvmem_match_available_layout(nvmem);
> +	} else {
> +		/* NVMEM devices might be "waiting" for this layout */
> +		ret = nvmem_for_each_dev(nvmem_dev_match_available_layout);
> +	}
> +
> +	if (ret)
> +		return notifier_from_errno(ret);
> +
> +	return NOTIFY_OK;
> +}
> +
>   static int __init nvmem_init(void)
>   {
> -	return bus_register(&nvmem_bus_type);
> +	int ret;
> +
> +	ret = bus_register(&nvmem_bus_type);
> +	if (ret)
> +		return ret;
> +
> +	nvmem_nb.notifier_call = &nvmem_notifier_call;
> +	return nvmem_register_notifier(&nvmem_nb);
>   }
>   
>   static void __exit nvmem_exit(void)
>   {
> +	nvmem_unregister_notifier(&nvmem_nb);
>   	bus_unregister(&nvmem_bus_type);
>   }
>   

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ