lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251218-stm32-dcmi-dma-chaining-v1-6-39948ca6cbf6@foss.st.com>
Date: Thu, 18 Dec 2025 19:44:46 +0100
From: Alain Volmat <alain.volmat@...s.st.com>
To: Hugues Fruchet <hugues.fruchet@...s.st.com>,
        Mauro Carvalho Chehab
	<mchehab@...nel.org>,
        Maxime Coquelin <mcoquelin.stm32@...il.com>,
        "Alexandre
 Torgue" <alexandre.torgue@...s.st.com>,
        Rob Herring <robh@...nel.org>,
        Krzysztof Kozlowski <krzk+dt@...nel.org>,
        Conor Dooley <conor+dt@...nel.org>,
        Sumit Semwal <sumit.semwal@...aro.org>,
        Christian König
	<christian.koenig@....com>
CC: <linux-media@...r.kernel.org>, <linux-stm32@...md-mailman.stormreply.com>,
        <linux-arm-kernel@...ts.infradead.org>, <linux-kernel@...r.kernel.org>,
        <devicetree@...r.kernel.org>, <dri-devel@...ts.freedesktop.org>,
        <linaro-mm-sig@...ts.linaro.org>,
        Alain Volmat <alain.volmat@...s.st.com>
Subject: [PATCH 06/12] media: stm32: dcmi: perform all dma handling within
 irq_thread

Move all the type of frame handling within the dcmi_irq_thread
handler and do not rely on dma_callback as previously.

This simplifies the code by having only a single path for both
compressed and uncompressed data while also making the system
more reactive since irq_handler have more chances to be called
faster than the dma completion callback.  Indeed, in case of the
dma completion callback, this run as a tasklet created by the
dma framework upon getting an interrupt from the dma and run
at a lower priority level than other irq handlers.

Signed-off-by: Alain Volmat <alain.volmat@...s.st.com>
---
 drivers/media/platform/st/stm32/stm32-dcmi.c | 97 ++++++----------------------
 1 file changed, 18 insertions(+), 79 deletions(-)

diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index 1015f2d88f54..06b66095844b 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -227,8 +227,9 @@ static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
 {
 	struct dcmi_buf *buf;
 
+	/* Nothing to do if we are not running */
 	if (dcmi->state != RUNNING)
-		return -EINVAL;
+		return 0;
 
 	/* Restart a new DMA transfer with next buffer */
 	if (list_empty(&dcmi->buffers)) {
@@ -242,52 +243,6 @@ static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
 	return dcmi_start_capture(dcmi, buf);
 }
 
-static void dcmi_dma_callback(void *param)
-{
-	struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
-	struct dma_tx_state state;
-	enum dma_status status;
-	struct dcmi_buf *buf = dcmi->active;
-
-	spin_lock_irq(&dcmi->irqlock);
-
-	/* Check DMA status */
-	status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
-
-	switch (status) {
-	case DMA_IN_PROGRESS:
-		dev_dbg(dcmi->dev, "%s: Received DMA_IN_PROGRESS\n", __func__);
-		break;
-	case DMA_PAUSED:
-		dev_err(dcmi->dev, "%s: Received DMA_PAUSED\n", __func__);
-		break;
-	case DMA_ERROR:
-		dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
-
-		/* Return buffer to V4L2 in error state */
-		dcmi_buffer_done(dcmi, buf, 0, -EIO);
-		break;
-	case DMA_COMPLETE:
-		dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
-
-		/* Return buffer to V4L2 */
-		dcmi_buffer_done(dcmi, buf, buf->size, 0);
-
-		spin_unlock_irq(&dcmi->irqlock);
-
-		/* Restart capture */
-		if (dcmi_restart_capture(dcmi))
-			dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n",
-				__func__);
-		return;
-	default:
-		dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
-		break;
-	}
-
-	spin_unlock_irq(&dcmi->irqlock);
-}
-
 static int dcmi_start_dma(struct stm32_dcmi *dcmi,
 			  struct dcmi_buf *buf)
 {
@@ -344,7 +299,7 @@ static void dcmi_set_crop(struct stm32_dcmi *dcmi)
 	reg_set(dcmi->regs, DCMI_CR, CR_CROP);
 }
 
-static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
+static void dcmi_process_frame(struct stm32_dcmi *dcmi)
 {
 	struct dma_tx_state state;
 	enum dma_status status;
@@ -354,13 +309,11 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
 		return;
 
 	/*
-	 * Because of variable JPEG buffer size sent by sensor,
-	 * DMA transfer never completes due to transfer size never reached.
-	 * In order to ensure that all the JPEG data are transferred
-	 * in active buffer memory, DMA is drained.
-	 * Then DMA tx status gives the amount of data transferred
-	 * to memory, which is then returned to V4L2 through the active
-	 * buffer payload.
+	 * At the time FRAME interrupt is received, all dma req have been sent to the DMA,
+	 * however DMA might still be transferring data hence first synchronize prior to
+	 * getting the status of the DMA transfer.
+	 * Then DMA tx status gives the amount of data transferred to memory, which is then
+	 * returned to V4L2 through the active buffer payload.
 	 */
 
 	spin_unlock_irq(&dcmi->irqlock);
@@ -368,16 +321,16 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
 	dmaengine_synchronize(dcmi->dma_chan);
 	spin_lock_irq(&dcmi->irqlock);
 
-	/* Get DMA residue to get JPEG size */
+	/* Get DMA status and residue size */
 	status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
 	if (status != DMA_ERROR && state.residue < buf->size) {
-		/* Return JPEG buffer to V4L2 with received JPEG buffer size */
+		/* Return buffer to V4L2 with received data size */
 		dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0);
 	} else {
 		dcmi->errors_count++;
-		dev_err(dcmi->dev, "%s: Cannot get JPEG size from DMA\n",
-			__func__);
-		/* Return JPEG buffer to V4L2 in ERROR state */
+		dev_err(dcmi->dev, "%s: DMA error. status: 0x%x, residue: %d\n",
+			__func__, status, state.residue);
+		/* Return buffer to V4L2 in ERROR state */
 		dcmi_buffer_done(dcmi, buf, 0, -EIO);
 	}
 
@@ -385,11 +338,6 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
 	/* Abort DMA operation */
 	dmaengine_terminate_sync(dcmi->dma_chan);
 	spin_lock_irq(&dcmi->irqlock);
-
-	/* Restart capture */
-	if (dcmi_restart_capture(dcmi))
-		dev_err(dcmi->dev, "%s: Cannot restart capture on JPEG received\n",
-			__func__);
 }
 
 static irqreturn_t dcmi_irq_thread(int irq, void *arg)
@@ -420,12 +368,10 @@ static irqreturn_t dcmi_irq_thread(int irq, void *arg)
 	if (dcmi->misr & IT_ERR)
 		dcmi->errors_count++;
 
-	if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG &&
-	    dcmi->misr & IT_FRAME) {
-		/* JPEG received */
-		dcmi_process_jpeg(dcmi);
-		spin_unlock_irq(&dcmi->irqlock);
-		return IRQ_HANDLED;
+	if (dcmi->misr & IT_FRAME) {
+		dcmi_process_frame(dcmi);
+		if (dcmi_restart_capture(dcmi))
+			dev_err(dcmi->dev, "%s: Cannot restart capture\n", __func__);
 	}
 
 	spin_unlock_irq(&dcmi->irqlock);
@@ -542,10 +488,6 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb)
 			return -EIO;
 		}
 
-		/* Set completion callback routine for notification */
-		buf->dma_desc->callback = dcmi_dma_callback;
-		buf->dma_desc->callback_param = dcmi;
-
 		/* Mark the descriptor as reusable to avoid having to prepare it */
 		ret = dmaengine_desc_set_reuse(buf->dma_desc);
 		if (ret) {
@@ -818,10 +760,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
 	}
 
 	/* Enable interruptions */
-	if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG)
-		reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
-	else
-		reg_set(dcmi->regs, DCMI_IER, IT_OVR | IT_ERR);
+	reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
 
 	spin_unlock_irq(&dcmi->irqlock);
 

-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ