lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <18ead528-5e24-419b-b7b3-9b1018124c93@ideasonboard.com>
Date: Fri, 23 Jan 2026 11:29:32 +0200
From: Tomi Valkeinen <tomi.valkeinen@...asonboard.com>
To: Rishikesh Donadkar <r-donadkar@...com>, jai.luthra@...ux.dev,
 laurent.pinchart@...asonboard.com, mripard@...nel.org
Cc: y-abhilashchandra@...com, devarsht@...com, s-jain1@...com,
 vigneshr@...com, mchehab@...nel.org, robh@...nel.org, krzk+dt@...nel.org,
 p.zabel@...gutronix.de, conor+dt@...nel.org, sakari.ailus@...ux.intel.com,
 hverkuil-cisco@...all.nl, jai.luthra@...asonboard.com,
 changhuang.liang@...rfivetech.com, jack.zhu@...rfivetech.com,
 sjoerd@...labora.com, dan.carpenter@...aro.org, hverkuil+cisco@...nel.org,
 linux-kernel@...r.kernel.org, linux-media@...r.kernel.org,
 devicetree@...r.kernel.org
Subject: Re: [PATCH v10 15/18] media: ti: j721e-csi2rx: Change the drain
 architecture for multistream

Hi,

On 21/01/2026 15:54, Rishikesh Donadkar wrote:
> On buffer starvation the DMA is marked IDLE, and the stale data in the
> internal FIFOs gets drained only on the next VIDIOC_QBUF call from the
> userspace. This approach works fine for a single stream case.
> 
> But in multistream scenarios, buffer starvation for one stream can
> block the shared HW FIFO of the CSI2RX IP. This can stall the pipeline
> for all other streams, even if buffers are available for  them.
> 
> This patch introduces a new architecture, that continuously drains data
> from the shared HW FIFO into a small (32KiB) buffer if no buffers are made
> available to the driver from the userspace. This ensures independence
> between different streams, where a slower downstream element for one
> camera does not block streaming for other cameras.
> 
> Additionally, after we drain for a stream, the next frame will be a
> partial frame, as a portion of its data will have already been drained
> before a valid buffer is queued by user space to the driver.
> Return the partial frame to user space with VB2_BUF_STATE_ERROR.
> 
> Use wait for completion barrier to make sure the shared hardware FIFO
> is cleared of the data at the end of stream after the source has stopped
> sending data.
> 
> Reviewed-by: Jai Luthra <jai.luthra@...asonboard.com>
> Reviewed-by: Yemike Abhilash Chandra <y-abhilashchandra@...com>
> Signed-off-by: Rishikesh Donadkar <r-donadkar@...com>
> ---

Reviewed-by: Tomi Valkeinen <tomi.valkeinen@...asonboard.com>

 Tomi

>  .../platform/ti/j721e-csi2rx/j721e-csi2rx.c   | 123 +++++++++---------
>  1 file changed, 60 insertions(+), 63 deletions(-)
> 
> diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
> index 5bb726f7d4f44..42ef8c553883f 100644
> --- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
> +++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
> @@ -82,8 +82,8 @@ struct ti_csi2rx_buffer {
>  
>  enum ti_csi2rx_dma_state {
>  	TI_CSI2RX_DMA_STOPPED,	/* Streaming not started yet. */
> -	TI_CSI2RX_DMA_IDLE,	/* Streaming but no pending DMA operation. */
>  	TI_CSI2RX_DMA_ACTIVE,	/* Streaming and pending DMA operation. */
> +	TI_CSI2RX_DMA_DRAINING, /* Dumping all the data in drain buffer */
>  };
>  
>  struct ti_csi2rx_dma {
> @@ -109,6 +109,7 @@ struct ti_csi2rx_ctx {
>  	struct v4l2_format		v_fmt;
>  	struct ti_csi2rx_dma		dma;
>  	struct media_pad		pad;
> +	struct completion		drain_complete;
>  	u32				sequence;
>  	u32				idx;
>  	u32				vc;
> @@ -249,6 +250,10 @@ static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
>  static int ti_csi2rx_start_dma(struct ti_csi2rx_ctx *ctx,
>  			       struct ti_csi2rx_buffer *buf);
>  
> +/* Forward declarations needed by ti_csi2rx_drain_callback. */
> +static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx);
> +static int ti_csi2rx_dma_submit_pending(struct ti_csi2rx_ctx *ctx);
> +
>  static const struct ti_csi2rx_fmt *find_format_by_fourcc(u32 pixelformat)
>  {
>  	unsigned int i;
> @@ -609,9 +614,32 @@ static void ti_csi2rx_setup_shim(struct ti_csi2rx_ctx *ctx)
>  
>  static void ti_csi2rx_drain_callback(void *param)
>  {
> -	struct completion *drain_complete = param;
> +	struct ti_csi2rx_ctx *ctx = param;
> +	struct ti_csi2rx_dma *dma = &ctx->dma;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&dma->lock, flags);
> +
> +	if (dma->state == TI_CSI2RX_DMA_STOPPED) {
> +		complete(&ctx->drain_complete);
> +		spin_unlock_irqrestore(&dma->lock, flags);
> +		return;
> +	}
>  
> -	complete(drain_complete);
> +	/*
> +	 * If dma->queue is empty, it indicates that no buffer has been
> +	 * provided by user space. In this case, initiate a transactions
> +	 * to drain the DMA. Since one drain of size DRAIN_BUFFER_SIZE
> +	 * will be done here, the subsequent frame will be a
> +	 * partial frame, with a size of frame_size - DRAIN_BUFFER_SIZE
> +	 */
> +	if (list_empty(&dma->queue)) {
> +		if (ti_csi2rx_drain_dma(ctx))
> +			dev_warn(ctx->csi->dev, "DMA drain failed\n");
> +	} else {
> +		ti_csi2rx_dma_submit_pending(ctx);
> +	}
> +	spin_unlock_irqrestore(&dma->lock, flags);
>  }
>  
>  /*
> @@ -629,12 +657,9 @@ static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx)
>  {
>  	struct ti_csi2rx_dev *csi = ctx->csi;
>  	struct dma_async_tx_descriptor *desc;
> -	struct completion drain_complete;
>  	dma_cookie_t cookie;
>  	int ret;
>  
> -	init_completion(&drain_complete);
> -
>  	desc = dmaengine_prep_slave_single(ctx->dma.chan, csi->drain.paddr,
>  					   csi->drain.len, DMA_DEV_TO_MEM,
>  					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> @@ -644,7 +669,7 @@ static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx)
>  	}
>  
>  	desc->callback = ti_csi2rx_drain_callback;
> -	desc->callback_param = &drain_complete;
> +	desc->callback_param = ctx;
>  
>  	cookie = dmaengine_submit(desc);
>  	ret = dma_submit_error(cookie);
> @@ -653,13 +678,6 @@ static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx)
>  
>  	dma_async_issue_pending(ctx->dma.chan);
>  
> -	if (!wait_for_completion_timeout(&drain_complete,
> -					 msecs_to_jiffies(DRAIN_TIMEOUT_MS))) {
> -		dmaengine_terminate_sync(ctx->dma.chan);
> -		dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
> -		ret = -ETIMEDOUT;
> -		goto out;
> -	}
>  out:
>  	return ret;
>  }
> @@ -703,14 +721,24 @@ static void ti_csi2rx_dma_callback(void *param)
>  	spin_lock_irqsave(&dma->lock, flags);
>  
>  	WARN_ON(!list_is_first(&buf->list, &dma->submitted));
> -	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
> +
> +	if (dma->state == TI_CSI2RX_DMA_DRAINING) {
> +		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
> +		dma->state = TI_CSI2RX_DMA_ACTIVE;
> +	} else {
> +		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
> +	}
> +
>  	list_del(&buf->list);
>  
>  	ti_csi2rx_dma_submit_pending(ctx);
>  
> -	if (list_empty(&dma->submitted))
> -		dma->state = TI_CSI2RX_DMA_IDLE;
> -
> +	if (list_empty(&dma->submitted)) {
> +		dma->state = TI_CSI2RX_DMA_DRAINING;
> +		if (ti_csi2rx_drain_dma(ctx))
> +			dev_warn(ctx->csi->dev,
> +				 "DMA drain failed on one of the transactions\n");
> +	}
>  	spin_unlock_irqrestore(&dma->lock, flags);
>  }
>  
> @@ -746,6 +774,7 @@ static int ti_csi2rx_start_dma(struct ti_csi2rx_ctx *ctx,
>  static void ti_csi2rx_stop_dma(struct ti_csi2rx_ctx *ctx)
>  {
>  	struct ti_csi2rx_dma *dma = &ctx->dma;
> +	struct ti_csi2rx_dev *csi = ctx->csi;
>  	enum ti_csi2rx_dma_state state;
>  	unsigned long flags;
>  	int ret;
> @@ -755,6 +784,8 @@ static void ti_csi2rx_stop_dma(struct ti_csi2rx_ctx *ctx)
>  	dma->state = TI_CSI2RX_DMA_STOPPED;
>  	spin_unlock_irqrestore(&dma->lock, flags);
>  
> +	init_completion(&ctx->drain_complete);
> +
>  	if (state != TI_CSI2RX_DMA_STOPPED) {
>  		/*
>  		 * Normal DMA termination does not clean up pending data on
> @@ -763,11 +794,20 @@ static void ti_csi2rx_stop_dma(struct ti_csi2rx_ctx *ctx)
>  		 * enforced before terminating DMA.
>  		 */
>  		ret = ti_csi2rx_drain_dma(ctx);
> -		if (ret && ret != -ETIMEDOUT)
> +		if (ret)
>  			dev_warn(ctx->csi->dev,
>  				 "Failed to drain DMA. Next frame might be bogus\n");
>  	}
>  
> +	/* We wait for the drain to complete so that the stream stops
> +	 * cleanly, making sure the shared hardware FIFO is cleared of
> +	 * data from the current stream. No more data will be coming from
> +	 * the source after this.
> +	 */
> +	if (!wait_for_completion_timeout(&ctx->drain_complete,
> +					 msecs_to_jiffies(DRAIN_TIMEOUT_MS)))
> +		dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
> +
>  	ret = dmaengine_terminate_sync(ctx->dma.chan);
>  	if (ret)
>  		dev_err(ctx->csi->dev, "Failed to stop DMA: %d\n", ret);
> @@ -830,57 +870,14 @@ static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
>  	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
>  	struct ti_csi2rx_buffer *buf;
>  	struct ti_csi2rx_dma *dma = &ctx->dma;
> -	bool restart_dma = false;
>  	unsigned long flags = 0;
> -	int ret;
>  
>  	buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
>  	buf->ctx = ctx;
>  
>  	spin_lock_irqsave(&dma->lock, flags);
> -	/*
> -	 * Usually the DMA callback takes care of queueing the pending buffers.
> -	 * But if DMA has stalled due to lack of buffers, restart it now.
> -	 */
> -	if (dma->state == TI_CSI2RX_DMA_IDLE) {
> -		/*
> -		 * Do not restart DMA with the lock held because
> -		 * ti_csi2rx_drain_dma() might block for completion.
> -		 * There won't be a race on queueing DMA anyway since the
> -		 * callback is not being fired.
> -		 */
> -		restart_dma = true;
> -		dma->state = TI_CSI2RX_DMA_ACTIVE;
> -	} else {
> -		list_add_tail(&buf->list, &dma->queue);
> -	}
> +	list_add_tail(&buf->list, &dma->queue);
>  	spin_unlock_irqrestore(&dma->lock, flags);
> -
> -	if (restart_dma) {
> -		/*
> -		 * Once frames start dropping, some data gets stuck in the DMA
> -		 * pipeline somewhere. So the first DMA transfer after frame
> -		 * drops gives a partial frame. This is obviously not useful to
> -		 * the application and will only confuse it. Issue a DMA
> -		 * transaction to drain that up.
> -		 */
> -		ret = ti_csi2rx_drain_dma(ctx);
> -		if (ret && ret != -ETIMEDOUT)
> -			dev_warn(ctx->csi->dev,
> -				 "Failed to drain DMA. Next frame might be bogus\n");
> -
> -		spin_lock_irqsave(&dma->lock, flags);
> -		ret = ti_csi2rx_start_dma(ctx, buf);
> -		if (ret) {
> -			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
> -			dma->state = TI_CSI2RX_DMA_IDLE;
> -			spin_unlock_irqrestore(&dma->lock, flags);
> -			dev_err(ctx->csi->dev, "Failed to start DMA: %d\n", ret);
> -		} else {
> -			list_add_tail(&buf->list, &dma->submitted);
> -			spin_unlock_irqrestore(&dma->lock, flags);
> -		}
> -	}
>  }
>  
>  static int ti_csi2rx_get_stream(struct ti_csi2rx_ctx *ctx)


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ