[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211125090028.786832-14-tudor.ambarus@microchip.com>
Date: Thu, 25 Nov 2021 11:00:28 +0200
From: Tudor Ambarus <tudor.ambarus@...rochip.com>
To: <ludovic.desroches@...rochip.com>, <vkoul@...nel.org>,
<richard.genoud@...il.com>, <gregkh@...uxfoundation.org>,
<jirislaby@...nel.org>
CC: <nicolas.ferre@...rochip.com>, <alexandre.belloni@...tlin.com>,
<mripard@...nel.org>, <linux-arm-kernel@...ts.infradead.org>,
<dmaengine@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-serial@...r.kernel.org>,
Tudor Ambarus <tudor.ambarus@...rochip.com>
Subject: [PATCH v2 13/13] dmaengine: at_xdmac: Fix race over irq_status
Tasklets run with interrupts enabled, so we need to protect
atchan->irq_status with spin_lock_irq() otherwise the tasklet can be
interrupted by the IRQ that modifies irq_status. While at this, rewrite
at_xdmac_tasklet() so that we get rid of a level of indentation.
Fixes: e1f7c9eee707 ("dmaengine: at_xdmac: creation of the atmel eXtended DMA Controller driver")
Signed-off-by: Tudor Ambarus <tudor.ambarus@...rochip.com>
---
drivers/dma/at_xdmac.c | 80 +++++++++++++++++++-----------------------
1 file changed, 37 insertions(+), 43 deletions(-)
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index ccd6ddb12b83..082c18d45188 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1623,6 +1623,7 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
dmaengine_desc_get_callback_invoke(txd, NULL);
}
+/* Called with atchan->lock held. */
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
@@ -1641,8 +1642,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
- spin_lock_irq(&atchan->lock);
-
/* Channel must be disabled first as it's not done automatically */
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
@@ -1652,10 +1651,8 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
struct at_xdmac_desc,
xfer_node);
- spin_unlock_irq(&atchan->lock);
-
/* Print bad descriptor's details if needed */
- dev_dbg(chan2dev(&atchan->chan),
+ dev_err(chan2dev(&atchan->chan),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
bad_desc->lld.mbr_ubc);
@@ -1665,55 +1662,52 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
static void at_xdmac_tasklet(struct tasklet_struct *t)
{
+ struct dma_async_tx_descriptor *txd;
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
struct at_xdmac_desc *desc;
u32 error_mask;
+ if (at_xdmac_chan_is_cyclic(atchan))
+ return at_xdmac_handle_cyclic(atchan);
+
+ error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
+ AT_XDMAC_CIS_ROIS;
+
+ spin_lock_irq(&atchan->lock);
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
__func__, atchan->irq_status);
+ if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
+ !(atchan->irq_status & error_mask)) {
+ return spin_unlock_irq(&atchan->lock);
+ }
- error_mask = AT_XDMAC_CIS_RBEIS
- | AT_XDMAC_CIS_WBEIS
- | AT_XDMAC_CIS_ROIS;
-
- if (at_xdmac_chan_is_cyclic(atchan)) {
- at_xdmac_handle_cyclic(atchan);
- } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
- || (atchan->irq_status & error_mask)) {
- struct dma_async_tx_descriptor *txd;
-
- if (atchan->irq_status & error_mask)
- at_xdmac_handle_error(atchan);
-
- spin_lock_irq(&atchan->lock);
- desc = list_first_entry(&atchan->xfers_list,
- struct at_xdmac_desc,
- xfer_node);
- dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
- if (!desc->active_xfer) {
- dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
- spin_unlock_irq(&atchan->lock);
- return;
- }
+ if (atchan->irq_status & error_mask)
+ at_xdmac_handle_error(atchan);
- txd = &desc->tx_dma_desc;
- dma_cookie_complete(txd);
- /* Remove the transfer from the transfer list. */
- list_del(&desc->xfer_node);
- spin_unlock_irq(&atchan->lock);
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+ if (!desc->active_xfer) {
+ dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
+ return spin_unlock_irq(&atchan->lock);
+ }
- if (txd->flags & DMA_PREP_INTERRUPT)
- dmaengine_desc_get_callback_invoke(txd, NULL);
+ txd = &desc->tx_dma_desc;
+ dma_cookie_complete(txd);
+ /* Remove the transfer from the transfer list. */
+ list_del(&desc->xfer_node);
+ spin_unlock_irq(&atchan->lock);
- dma_run_dependencies(txd);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
- spin_lock_irq(&atchan->lock);
- /* Move the xfer descriptors into the free descriptors list. */
- list_splice_tail_init(&desc->descs_list,
- &atchan->free_descs_list);
- at_xdmac_advance_work(atchan);
- spin_unlock_irq(&atchan->lock);
- }
+ dma_run_dependencies(txd);
+
+ spin_lock_irq(&atchan->lock);
+ /* Move the xfer descriptors into the free descriptors list. */
+ list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
+ at_xdmac_advance_work(atchan);
+ spin_unlock_irq(&atchan->lock);
}
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
--
2.25.1
Powered by blists - more mailing lists