[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <176518087747.20066.2164303044523575108@freya>
Date: Mon, 08 Dec 2025 13:31:17 +0530
From: Jai Luthra <jai.luthra@...asonboard.com>
To: Rishikesh Donadkar <r-donadkar@...com>, Tomi Valkeinen <tomi.valkeinen@...asonboard.com>, jai.luthra@...ux.dev, laurent.pinchart@...asonboard.com, mripard@...nel.org
Cc: y-abhilashchandra@...com, devarsht@...com, s-jain1@...com, vigneshr@...com, mchehab@...nel.org, robh@...nel.org, krzk+dt@...nel.org, p.zabel@...gutronix.de, conor+dt@...nel.org, sakari.ailus@...ux.intel.com, hverkuil-cisco@...all.nl, changhuang.liang@...rfivetech.com, jack.zhu@...rfivetech.com, sjoerd@...labora.com, dan.carpenter@...aro.org, hverkuil+cisco@...nel.org, linux-kernel@...r.kernel.org, linux-media@...r.kernel.org, devicetree@...r.kernel.org
Subject: Re: [PATCH v8 18/18] media: ti: j721e-csi2rx: Support system suspend using pm_notifier
Quoting Tomi Valkeinen (2025-12-01 19:16:38)
> Hi,
>
> On 12/11/2025 13:54, Rishikesh Donadkar wrote:
> > From: Jai Luthra <jai.luthra@...asonboard.com>
> >
> > As this device is the "orchestrator" for the rest of the media
> > pipeline, we need to stop all on-going streams before system suspend and
> > enable them back when the system wakes up from sleep.
> >
> > Using .suspend/.resume callbacks does not work, as the order of those
> > callbacks amongst various devices in the camera pipeline like the sensor,
> > FPD serdes, CSI bridge etc. is impossible to enforce, even with
> > device links. For example, the Cadence CSI bridge is a child device of
> > this device, thus we cannot create a device link with the CSI bridge as
> > a provider and this device as consumer. This can lead to situations
> > where all the dependencies for the bridge have not yet resumed when we
> > request the subdev to start streaming again through the .resume callback
> > defined in this device.
> >
> > Instead here we register a notifier callback with the PM framework
> > which is triggered when the system is fully functional. At this point we
> > can cleanly stop or start the streams, because we know all other devices
> > and their dependencies are functional. A downside of this approach is
> > that the userspace is also alive (not frozen yet, or just thawed), so
> > the suspend notifier might complete before the userspace has completed
> > all ioctls, like QBUF/DQBUF/STREAMON/STREAMOFF.
> >
> > Tested-by: Rishikesh Donadkar <r-donadkar@...com>
> > Reviewed-by: Rishikesh Donadkar <r-donadkar@...com>
> > Signed-off-by: Jai Luthra <jai.luthra@...asonboard.com>
> > Signed-off-by: Rishikesh Donadkar <r-donadkar@...com>
> > ---
> > .../platform/ti/j721e-csi2rx/j721e-csi2rx.c | 128 ++++++++++++++++++
> > 1 file changed, 128 insertions(+)
> >
> > diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
> > index 21e032c64b901..dd47758d51a90 100644
> > --- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
> > +++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
> > @@ -131,6 +131,7 @@ struct ti_csi2rx_dev {
> > struct v4l2_subdev *source;
> > struct v4l2_subdev subdev;
> > struct ti_csi2rx_ctx ctx[TI_CSI2RX_MAX_CTX];
> > + struct notifier_block pm_notifier;
> > u8 pix_per_clk;
> > /* Buffer to drain stale data from PSI-L endpoint */
> > struct {
> > @@ -1550,6 +1551,124 @@ static int ti_csi2rx_runtime_resume(struct device *dev)
> > return 0;
> > }
> >
> > +static int ti_csi2rx_suspend(struct device *dev)
> > +{
> > + struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
> > + enum ti_csi2rx_dma_state state;
> > + struct ti_csi2rx_ctx *ctx;
> > + struct ti_csi2rx_dma *dma;
> > + unsigned long flags = 0;
> > + int i, ret = 0;
> > +
> > + /* If device was not in use we can simply suspend */
> > + if (pm_runtime_status_suspended(dev))
> > + return 0;
> > +
> > + /*
> > + * If device is running, assert the pixel reset to cleanly stop any
> > + * on-going streams before we suspend.
> > + */
> > + writel(0, csi->shim + SHIM_CNTL);
> > +
> > + for (i = 0; i < csi->num_ctx; i++) {
> > + ctx = &csi->ctx[i];
> > + dma = &ctx->dma;
> > +
> > + spin_lock_irqsave(&dma->lock, flags);
> > + state = dma->state;
> > + spin_unlock_irqrestore(&dma->lock, flags);
> > +
> > + if (state != TI_CSI2RX_DMA_STOPPED) {
> > + /* Disable source */
> > + ret = v4l2_subdev_disable_streams(&csi->subdev,
> > + TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
> > + BIT(0));
> > + if (ret)
> > + dev_err(csi->dev, "Failed to stop subdev stream\n");
> > + }
> > +
> > + /* Stop any on-going streams */
> > + writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));
> > +
> > + /* Drain DMA */
> > + ti_csi2rx_drain_dma(ctx);
> > +
> > + /* Terminate DMA */
> > + ret = dmaengine_terminate_sync(ctx->dma.chan);
> > + if (ret)
> > + dev_err(csi->dev, "Failed to stop DMA\n");
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +static int ti_csi2rx_resume(struct device *dev)
> > +{
> > + struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
> > + struct ti_csi2rx_ctx *ctx;
> > + struct ti_csi2rx_dma *dma;
> > + struct ti_csi2rx_buffer *buf;
> > + unsigned long flags = 0;
> > + unsigned int reg;
> > + int i, ret = 0;
> > +
> > + /* If device was not in use, we can simply wakeup */
> > + if (pm_runtime_status_suspended(dev))
> > + return 0;
>
> Don't we have a streaming-count that would be more intuitive to use as a
> "are we streaming"?
Indeed.
>
> And the previous patch said that we lose the DMA channel pairings when
> suspending. Doesn't that happen here?
In the case of system suspend the UDMA driver has (late) hooks that do the
book-keeping of channel configuration and restores it (early) on system
resume.
Thanks,
Jai
>
> Tomi
>
> > +
> > + /* If device was in use before, restore all the running streams */
> > + reg = SHIM_CNTL_PIX_RST;
> > + writel(reg, csi->shim + SHIM_CNTL);
> > +
> > + for (i = 0; i < csi->num_ctx; i++) {
> > + ctx = &csi->ctx[i];
> > + dma = &ctx->dma;
> > + spin_lock_irqsave(&dma->lock, flags);
> > + if (dma->state != TI_CSI2RX_DMA_STOPPED) {
> > + /* Re-submit all previously submitted buffers to DMA */
> > + list_for_each_entry(buf, &ctx->dma.submitted, list) {
> > + ti_csi2rx_start_dma(ctx, buf);
> > + }
> > + spin_unlock_irqrestore(&dma->lock, flags);
> > +
> > + /* Restore stream config */
> > + ti_csi2rx_setup_shim(ctx);
> > +
> > + ret = v4l2_subdev_enable_streams(&csi->subdev,
> > + TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
> > + BIT(0));
> > + if (ret)
> > + dev_err(ctx->csi->dev, "Failed to start subdev\n");
> > + } else {
> > + spin_unlock_irqrestore(&dma->lock, flags);
> > + }
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +static int ti_csi2rx_pm_notifier(struct notifier_block *nb,
> > + unsigned long action, void *data)
> > +{
> > + struct ti_csi2rx_dev *csi =
> > + container_of(nb, struct ti_csi2rx_dev, pm_notifier);
> > +
> > + switch (action) {
> > + case PM_HIBERNATION_PREPARE:
> > + case PM_SUSPEND_PREPARE:
> > + case PM_RESTORE_PREPARE:
> > + ti_csi2rx_suspend(csi->dev);
> > + break;
> > + case PM_POST_SUSPEND:
> > + case PM_POST_HIBERNATION:
> > + case PM_POST_RESTORE:
> > + ti_csi2rx_resume(csi->dev);
> > + break;
> > + }
> > +
> > + return NOTIFY_DONE;
> > +}
> > +
> > static const struct dev_pm_ops ti_csi2rx_pm_ops = {
> > RUNTIME_PM_OPS(ti_csi2rx_runtime_suspend, ti_csi2rx_runtime_resume,
> > NULL)
> > @@ -1622,6 +1741,13 @@ static int ti_csi2rx_probe(struct platform_device *pdev)
> > goto err_notifier;
> > }
> >
> > + csi->pm_notifier.notifier_call = ti_csi2rx_pm_notifier;
> > + ret = register_pm_notifier(&csi->pm_notifier);
> > + if (ret) {
> > + dev_err(csi->dev, "Failed to create PM notifier: %d\n", ret);
> > + goto err_notifier;
> > + }
> > +
> > pm_runtime_set_active(csi->dev);
> > pm_runtime_enable(csi->dev);
> > pm_request_idle(csi->dev);
> > @@ -1652,6 +1778,8 @@ static void ti_csi2rx_remove(struct platform_device *pdev)
> > ti_csi2rx_cleanup_ctx(&csi->ctx[i]);
> >
> > ti_csi2rx_cleanup_notifier(csi);
> > + unregister_pm_notifier(&csi->pm_notifier);
> > +
> > ti_csi2rx_cleanup_v4l2(csi);
> > mutex_destroy(&csi->mutex);
> > dma_free_coherent(csi->dev, csi->drain.len, csi->drain.vaddr,
>
Powered by blists - more mailing lists