[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z17Ry6Z9OMqP1MV-@valkosipuli.retiisi.eu>
Date: Sun, 15 Dec 2024 12:55:39 +0000
From: Sakari Ailus <sakari.ailus@....fi>
To: Alain Volmat <alain.volmat@...s.st.com>
Cc: Hugues Fruchet <hugues.fruchet@...s.st.com>,
Mauro Carvalho Chehab <mchehab@...nel.org>,
Maxime Coquelin <mcoquelin.stm32@...il.com>,
Alexandre Torgue <alexandre.torgue@...s.st.com>,
Sakari Ailus <sakari.ailus@...ux.intel.com>,
Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>,
Conor Dooley <conor+dt@...nel.org>,
Philipp Zabel <p.zabel@...gutronix.de>,
Hans Verkuil <hverkuil@...all.nl>, linux-media@...r.kernel.org,
linux-stm32@...md-mailman.stormreply.com,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
devicetree@...r.kernel.org
Subject: Re: [PATCH v4 13/15] media: stm32: dcmipp: add core support for the
stm32mp25
Hi Alain,
On Thu, Dec 12, 2024 at 10:17:37AM +0100, Alain Volmat wrote:
> The stm32mp25 supports both parallel & csi inputs.
> An additional clock control is necessary.
> Skeleton of the subdev structures for the stm32mp25 is added,
> identical for the time being to the stm32mp13 however more subdeves
> will be added in further commits.
>
> Signed-off-by: Alain Volmat <alain.volmat@...s.st.com>
> ---
> v4:
> - correct clock handling within dcmipp_runtime_resume
> ---
> .../platform/st/stm32/stm32-dcmipp/dcmipp-core.c | 104 +++++++++++++++++----
> 1 file changed, 85 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
> index 62dd17e0488d..71acf539e1f3 100644
> --- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
> +++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
> @@ -40,6 +40,7 @@ struct dcmipp_device {
>
> /* Hardware resources */
> void __iomem *regs;
> + struct clk *mclk;
> struct clk *kclk;
>
> /* The pipeline configuration */
> @@ -132,6 +133,40 @@ static const struct dcmipp_pipeline_config stm32mp13_pipe_cfg = {
> .hw_revision = DCMIPP_STM32MP13_VERR
> };
>
> +static const struct dcmipp_ent_config stm32mp25_ent_config[] = {
> + {
> + .name = "dcmipp_input",
> + .init = dcmipp_inp_ent_init,
> + .release = dcmipp_inp_ent_release,
> + },
> + {
> + .name = "dcmipp_dump_postproc",
> + .init = dcmipp_byteproc_ent_init,
> + .release = dcmipp_byteproc_ent_release,
> + },
> + {
> + .name = "dcmipp_dump_capture",
> + .init = dcmipp_bytecap_ent_init,
> + .release = dcmipp_bytecap_ent_release,
> + },
> +};
> +
> +static const struct dcmipp_ent_link stm32mp25_ent_links[] = {
> + DCMIPP_ENT_LINK(ID_INPUT, 1, ID_DUMP_BYTEPROC, 0,
> + MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
> + DCMIPP_ENT_LINK(ID_DUMP_BYTEPROC, 1, ID_DUMP_CAPTURE, 0,
> + MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
> +};
> +
> +#define DCMIPP_STM32MP25_VERR 0x30
> +static const struct dcmipp_pipeline_config stm32mp25_pipe_cfg = {
> + .ents = stm32mp25_ent_config,
> + .num_ents = ARRAY_SIZE(stm32mp25_ent_config),
> + .links = stm32mp25_ent_links,
> + .num_links = ARRAY_SIZE(stm32mp25_ent_links),
> + .hw_revision = DCMIPP_STM32MP25_VERR
> +};
> +
> #define LINK_FLAG_TO_STR(f) ((f) == 0 ? "" :\
> (f) == MEDIA_LNK_FL_ENABLED ? "ENABLED" :\
> (f) == MEDIA_LNK_FL_IMMUTABLE ? "IMMUTABLE" :\
> @@ -212,6 +247,7 @@ static int dcmipp_create_subdevs(struct dcmipp_device *dcmipp)
>
> static const struct of_device_id dcmipp_of_match[] = {
> { .compatible = "st,stm32mp13-dcmipp", .data = &stm32mp13_pipe_cfg },
> + { .compatible = "st,stm32mp25-dcmipp", .data = &stm32mp25_pipe_cfg },
> { /* end node */ },
> };
> MODULE_DEVICE_TABLE(of, dcmipp_of_match);
> @@ -261,13 +297,22 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
> {
> struct dcmipp_device *dcmipp = notifier_to_dcmipp(notifier);
> unsigned int ret;
> - int src_pad;
> + int src_pad, i;
unsigned int?
> struct dcmipp_ent_device *sink;
> - struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_PARALLEL };
> + struct v4l2_fwnode_endpoint vep = { 0 };
> struct fwnode_handle *ep;
> + enum v4l2_mbus_type supported_types[] = {
> + V4L2_MBUS_PARALLEL, V4L2_MBUS_BT656, V4L2_MBUS_CSI2_DPHY
> + };
> + int supported_types_nb = ARRAY_SIZE(supported_types);
>
> dev_dbg(dcmipp->dev, "Subdev \"%s\" bound\n", subdev->name);
>
> + /* Only MP25 supports CSI input */
> + if (!of_device_is_compatible(dcmipp->dev->of_node,
> + "st,stm32mp25-dcmipp"))
This would be much cleaner with match data. You seem to already be using it
for other purposes.
> + supported_types_nb--;
> +
> /*
> * Link this sub-device to DCMIPP, it could be
> * a parallel camera sensor or a CSI-2 to parallel bridge
> @@ -284,21 +329,23 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
> return -ENODEV;
> }
>
> - /* Check for parallel bus-type first, then bt656 */
> - ret = v4l2_fwnode_endpoint_parse(ep, &vep);
> - if (ret) {
> - vep.bus_type = V4L2_MBUS_BT656;
> + /* Check for supported MBUS type */
> + for (i = 0; i < supported_types_nb; i++) {
> + vep.bus_type = supported_types[i];
> ret = v4l2_fwnode_endpoint_parse(ep, &vep);
> - if (ret) {
> - dev_err(dcmipp->dev, "Could not parse the endpoint\n");
> - fwnode_handle_put(ep);
> - return ret;
> - }
> + if (!ret)
> + break;
> }
>
> fwnode_handle_put(ep);
>
> - if (vep.bus.parallel.bus_width == 0) {
> + if (ret) {
> + dev_err(dcmipp->dev, "Could not parse the endpoint\n");
> + return ret;
> + }
> +
> + if (vep.bus_type != V4L2_MBUS_CSI2_DPHY &&
> + vep.bus.parallel.bus_width == 0) {
> dev_err(dcmipp->dev, "Invalid parallel interface bus-width\n");
> return -ENODEV;
> }
> @@ -311,11 +358,13 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
> return -ENODEV;
> }
>
> - /* Parallel input device detected, connect it to parallel subdev */
> + /* Connect input device to the dcmipp_input subdev */
> sink = dcmipp->entity[ID_INPUT];
> - sink->bus.flags = vep.bus.parallel.flags;
> - sink->bus.bus_width = vep.bus.parallel.bus_width;
> - sink->bus.data_shift = vep.bus.parallel.data_shift;
> + if (vep.bus_type != V4L2_MBUS_CSI2_DPHY) {
> + sink->bus.flags = vep.bus.parallel.flags;
> + sink->bus.bus_width = vep.bus.parallel.bus_width;
> + sink->bus.data_shift = vep.bus.parallel.data_shift;
> + }
> sink->bus_type = vep.bus_type;
> ret = media_create_pad_link(&subdev->entity, src_pad, sink->ent, 0,
> MEDIA_LNK_FL_IMMUTABLE |
> @@ -414,7 +463,7 @@ static int dcmipp_graph_init(struct dcmipp_device *dcmipp)
> static int dcmipp_probe(struct platform_device *pdev)
> {
> struct dcmipp_device *dcmipp;
> - struct clk *kclk;
> + struct clk *kclk, *mclk;
> const struct dcmipp_pipeline_config *pipe_cfg;
> struct reset_control *rstc;
> int irq;
> @@ -474,12 +523,20 @@ static int dcmipp_probe(struct platform_device *pdev)
> return ret;
> }
>
> - kclk = devm_clk_get(&pdev->dev, NULL);
> + kclk = devm_clk_get(&pdev->dev, "kclk");
> if (IS_ERR(kclk))
> return dev_err_probe(&pdev->dev, PTR_ERR(kclk),
> "Unable to get kclk\n");
> dcmipp->kclk = kclk;
>
> + if (!of_device_is_compatible(pdev->dev.of_node, "st,stm32mp13-dcmipp")) {
Same here.
> + mclk = devm_clk_get(&pdev->dev, "mclk");
> + if (IS_ERR(mclk))
> + return dev_err_probe(&pdev->dev, PTR_ERR(mclk),
> + "Unable to get mclk\n");
> + dcmipp->mclk = mclk;
> + }
> +
> dcmipp->entity = devm_kcalloc(&pdev->dev, dcmipp->pipe_cfg->num_ents,
> sizeof(*dcmipp->entity), GFP_KERNEL);
> if (!dcmipp->entity)
> @@ -542,6 +599,7 @@ static int dcmipp_runtime_suspend(struct device *dev)
> struct dcmipp_device *dcmipp = dev_get_drvdata(dev);
>
> clk_disable_unprepare(dcmipp->kclk);
> + clk_disable_unprepare(dcmipp->mclk);
>
> return 0;
> }
> @@ -551,9 +609,17 @@ static int dcmipp_runtime_resume(struct device *dev)
> struct dcmipp_device *dcmipp = dev_get_drvdata(dev);
> int ret;
>
> + ret = clk_prepare_enable(dcmipp->mclk);
> + if (ret) {
> + dev_err(dev, "%s: Failed to prepare_enable mclk\n", __func__);
> + return ret;
> + }
> +
> ret = clk_prepare_enable(dcmipp->kclk);
> - if (ret)
> + if (ret) {
> + clk_disable_unprepare(dcmipp->mclk);
> dev_err(dev, "%s: Failed to prepare_enable kclk\n", __func__);
> + }
>
> return ret;
> }
>
>
--
Kind regards,
Sakari Ailus
Powered by blists - more mailing lists