lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <175081523471.8144.8244237988305732382@freya>
Date: Tue, 24 Jun 2025 18:33:54 -0700
From: Jai Luthra <jai.luthra@...asonboard.com>
To: Rishikesh Donadkar <r-donadkar@...com>, jai.luthra@...ux.dev, laurent.pinchart@...asonboard.com, mripard@...nel.org
Cc: y-abhilashchandra@...com, devarsht@...com, vaishnav.a@...com, s-jain1@...com, vigneshr@...com, mchehab@...nel.org, robh@...nel.org, krzk+dt@...nel.org, conor+dt@...nel.org, sakari.ailus@...ux.intel.com, hverkuil-cisco@...all.nl, tomi.valkeinen@...asonboard.com, changhuang.liang@...rfivetech.com, jack.zhu@...rfivetech.com, linux-kernel@...r.kernel.org, linux-media@...r.kernel.org, devicetree@...r.kernel.org
Subject: Re: [PATCH v4 12/12] media: ti: j721e-csi2rx: Change the drain architecture for multistream

Hi Rishikesh,

Thanks for the patch.

Quoting Rishikesh Donadkar (2025-05-14 04:25:27)
> On buffer starvation the DMA is marked IDLE, and the stale data in the
> internal FIFOs gets drained only on the next VIDIOC_QBUF call from the
> userspace. This approach works fine for a single stream case.
> 
> But in multistream scenarios, buffer starvation for one stream i.e. one
> virtual channel, can block the shared HW FIFO of the CSI2RX IP. This can
> stall the pipeline for all other virtual channels, even if buffers are
> available for them.
> 
> This patch introduces a new architecture, that continuously drains data
> from the shared HW FIFO into a small (32KiB) buffer if no buffers are made
> available to the driver from the userspace. This ensures independence
> between different streams, where a slower downstream element for one
> camera does not block streaming for other cameras.
> 
> Signed-off-by: Rishikesh Donadkar <r-donadkar@...com>
> ---
>  .../platform/ti/j721e-csi2rx/j721e-csi2rx.c   | 96 +++++++------------
>  1 file changed, 33 insertions(+), 63 deletions(-)
> 
> diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
> index ba2a30bfed37d..3b046d3cf7e5a 100644
> --- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
> +++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
> @@ -57,7 +57,6 @@
>  #define TI_CSI2RX_MAX_SOURCE_PADS      TI_CSI2RX_MAX_CTX
>  #define TI_CSI2RX_MAX_PADS             (1 + TI_CSI2RX_MAX_SOURCE_PADS)
>  
> -#define DRAIN_TIMEOUT_MS               50
>  #define DRAIN_BUFFER_SIZE              SZ_32K
>  
>  struct ti_csi2rx_fmt {
> @@ -77,7 +76,6 @@ struct ti_csi2rx_buffer {
>  
>  enum ti_csi2rx_dma_state {
>         TI_CSI2RX_DMA_STOPPED,  /* Streaming not started yet. */
> -       TI_CSI2RX_DMA_IDLE,     /* Streaming but no pending DMA operation. */
>         TI_CSI2RX_DMA_ACTIVE,   /* Streaming and pending DMA operation. */
>  };
>  
> @@ -245,6 +243,10 @@ static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
>  static int ti_csi2rx_start_dma(struct ti_csi2rx_ctx *ctx,
>                                struct ti_csi2rx_buffer *buf);
>  
> +/* Forward declarations needed by ti_csi2rx_drain_callback. */
> +static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx);
> +static int ti_csi2rx_dma_submit_pending(struct ti_csi2rx_ctx *ctx);
> +
>  static const struct ti_csi2rx_fmt *find_format_by_fourcc(u32 pixelformat)
>  {
>         unsigned int i;
> @@ -596,9 +598,28 @@ static void ti_csi2rx_setup_shim(struct ti_csi2rx_ctx *ctx)
>  
>  static void ti_csi2rx_drain_callback(void *param)
>  {
> -       struct completion *drain_complete = param;
> +       struct ti_csi2rx_ctx *ctx = param;
> +       struct ti_csi2rx_dma *dma = &ctx->dma;
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&dma->lock, flags);
>  
> -       complete(drain_complete);
> +       if (dma->state == TI_CSI2RX_DMA_STOPPED) {
> +               spin_unlock_irqrestore(&dma->lock, flags);
> +               return;
> +       }
> +
> +       /*
> +        * If dma->queue is empty, it signals no buffer has arrived from
> +        * user space, so, queue more transaction to drain dma
> +        */
> +       if (list_empty(&dma->queue)) {
> +               if (ti_csi2rx_drain_dma(ctx))
> +                       dev_warn(ctx->csi->dev, "DMA drain failed\n");
> +       } else {
> +               ti_csi2rx_dma_submit_pending(ctx);
> +       }
> +       spin_unlock_irqrestore(&dma->lock, flags);
>  }
>  
>  /*
> @@ -616,12 +637,9 @@ static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx)
>  {
>         struct ti_csi2rx_dev *csi = ctx->csi;
>         struct dma_async_tx_descriptor *desc;
> -       struct completion drain_complete;
>         dma_cookie_t cookie;
>         int ret;
>  
> -       init_completion(&drain_complete);
> -
>         desc = dmaengine_prep_slave_single(ctx->dma.chan, csi->drain.paddr,
>                                            csi->drain.len, DMA_DEV_TO_MEM,
>                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> @@ -631,7 +649,7 @@ static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx)
>         }
>  
>         desc->callback = ti_csi2rx_drain_callback;
> -       desc->callback_param = &drain_complete;
> +       desc->callback_param = ctx;
>  
>         cookie = dmaengine_submit(desc);
>         ret = dma_submit_error(cookie);
> @@ -640,13 +658,6 @@ static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx)
>  
>         dma_async_issue_pending(ctx->dma.chan);
>  
> -       if (!wait_for_completion_timeout(&drain_complete,
> -                                        msecs_to_jiffies(DRAIN_TIMEOUT_MS))) {
> -               dmaengine_terminate_sync(ctx->dma.chan);
> -               dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
> -               ret = -ETIMEDOUT;
> -               goto out;
> -       }
>  out:
>         return ret;
>  }
> @@ -694,9 +705,11 @@ static void ti_csi2rx_dma_callback(void *param)
>  
>         ti_csi2rx_dma_submit_pending(ctx);
>  
> -       if (list_empty(&dma->submitted))
> -               dma->state = TI_CSI2RX_DMA_IDLE;
> -
> +       if (list_empty(&dma->submitted)) {
> +               if (ti_csi2rx_drain_dma(ctx))
> +                       dev_warn(ctx->csi->dev,
> +                                "DMA drain failed on one of the transactions\n");
> +       }
>         spin_unlock_irqrestore(&dma->lock, flags);
>  }
>  
> @@ -749,7 +762,7 @@ static void ti_csi2rx_stop_dma(struct ti_csi2rx_ctx *ctx)
>                  * enforced before terminating DMA.
>                  */
>                 ret = ti_csi2rx_drain_dma(ctx);
> -               if (ret && ret != -ETIMEDOUT)
> +               if (ret)
>                         dev_warn(ctx->csi->dev,
>                                  "Failed to drain DMA. Next frame might be bogus\n");
>         }
> @@ -816,57 +829,14 @@ static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
>         struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
>         struct ti_csi2rx_buffer *buf;
>         struct ti_csi2rx_dma *dma = &ctx->dma;
> -       bool restart_dma = false;
>         unsigned long flags = 0;
> -       int ret;
>  
>         buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
>         buf->ctx = ctx;
>  
>         spin_lock_irqsave(&dma->lock, flags);
> -       /*
> -        * Usually the DMA callback takes care of queueing the pending buffers.
> -        * But if DMA has stalled due to lack of buffers, restart it now.
> -        */
> -       if (dma->state == TI_CSI2RX_DMA_IDLE) {
> -               /*
> -                * Do not restart DMA with the lock held because
> -                * ti_csi2rx_drain_dma() might block for completion.
> -                * There won't be a race on queueing DMA anyway since the
> -                * callback is not being fired.
> -                */
> -               restart_dma = true;
> -               dma->state = TI_CSI2RX_DMA_ACTIVE;
> -       } else {
> -               list_add_tail(&buf->list, &dma->queue);
> -       }
> +       list_add_tail(&buf->list, &dma->queue);
>         spin_unlock_irqrestore(&dma->lock, flags);
> -
> -       if (restart_dma) {
> -               /*
> -                * Once frames start dropping, some data gets stuck in the DMA
> -                * pipeline somewhere. So the first DMA transfer after frame
> -                * drops gives a partial frame. This is obviously not useful to

I think it would be good to retain a similar comment in the code, and also
in the commit message, as there is a possibility of returning a partial frame
to the user post drain.

With that change,

Reviewed-by: Jai Luthra <jai.luthra@...asonboard.com>

> -                * the application and will only confuse it. Issue a DMA
> -                * transaction to drain that up.
> -                */
> -               ret = ti_csi2rx_drain_dma(ctx);
> -               if (ret && ret != -ETIMEDOUT)
> -                       dev_warn(ctx->csi->dev,
> -                                "Failed to drain DMA. Next frame might be bogus\n");
> -
> -               spin_lock_irqsave(&dma->lock, flags);
> -               ret = ti_csi2rx_start_dma(ctx, buf);
> -               if (ret) {
> -                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
> -                       dma->state = TI_CSI2RX_DMA_IDLE;
> -                       spin_unlock_irqrestore(&dma->lock, flags);
> -                       dev_err(ctx->csi->dev, "Failed to start DMA: %d\n", ret);
> -               } else {
> -                       list_add_tail(&buf->list, &dma->submitted);
> -                       spin_unlock_irqrestore(&dma->lock, flags);
> -               }
> -       }
>  }
>  
>  static int ti_csi2rx_get_route(struct ti_csi2rx_ctx *ctx)
> -- 
> 2.34.1
>
Download attachment "signature.asc" of type "application/pgp-signature" (834 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ