lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200123140237.125799-10-tudor.ambarus@microchip.com>
Date:   Thu, 23 Jan 2020 14:03:17 +0000
From:   <Tudor.Ambarus@...rochip.com>
To:     <Ludovic.Desroches@...rochip.com>, <dan.j.williams@...el.com>,
        <vkoul@...nel.org>
CC:     <dmaengine@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        <Tudor.Ambarus@...rochip.com>
Subject: [PATCH 10/10] dmaengine: at_xdmac: Fix locking in tasklet

From: Tudor Ambarus <tudor.ambarus@...rochip.com>

Tasklets run with all the interrupts enabled. This means that we should
replace all the (already present) spin_lock_irqsave() uses in the tasklet
with spin_lock_irq() to protect being interrupted by a IRQ which tries
to get the same lock (via calls to device_prep_dma_* for example).

spin_lock and spin_lock_bh in tasklets are not enough to protect from IRQs,
update these to spin_lock_irq().

at_xdmac_advance_work() can be called with all the interrupts enabled (when
called from tasklet), or with interrupts disabled (when called from
at_xdmac_issue_pending). Move the locking in the callers to be able to use
spin_lock_irq() and spin_lock_irqsave() for these cases.

Signed-off-by: Tudor Ambarus <tudor.ambarus@...rochip.com>
---
 drivers/dma/at_xdmac.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 31321da69ae6..bb0eaf38b594 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1543,9 +1543,6 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
 {
 	struct at_xdmac_desc	*desc;
-	unsigned long		flags;
-
-	spin_lock_irqsave(&atchan->lock, flags);
 
 	/*
 	 * If channel is enabled, do nothing, advance_work will be triggered
@@ -1559,8 +1556,6 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
 		if (!desc->active_xfer)
 			at_xdmac_start_xfer(atchan, desc);
 	}
-
-	spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1596,7 +1591,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
 	if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
 		dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
 
-	spin_lock_bh(&atchan->lock);
+	spin_lock_irq(&atchan->lock);
 
 	/* Channel must be disabled first as it's not done automatically */
 	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
@@ -1607,7 +1602,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
 				    struct at_xdmac_desc,
 				    xfer_node);
 
-	spin_unlock_bh(&atchan->lock);
+	spin_unlock_irq(&atchan->lock);
 
 	/* Print bad descriptor's details if needed */
 	dev_dbg(chan2dev(&atchan->chan),
@@ -1640,21 +1635,21 @@ static void at_xdmac_tasklet(unsigned long data)
 		if (atchan->irq_status & error_mask)
 			at_xdmac_handle_error(atchan);
 
-		spin_lock(&atchan->lock);
+		spin_lock_irq(&atchan->lock);
 		desc = list_first_entry(&atchan->xfers_list,
 					struct at_xdmac_desc,
 					xfer_node);
 		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
 		if (!desc->active_xfer) {
 			dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
-			spin_unlock(&atchan->lock);
+			spin_unlock_irq(&atchan->lock);
 			return;
 		}
 
 		txd = &desc->tx_dma_desc;
 
 		at_xdmac_remove_xfer(atchan, desc);
-		spin_unlock(&atchan->lock);
+		spin_unlock_irq(&atchan->lock);
 
 		dma_cookie_complete(txd);
 		if (txd->flags & DMA_PREP_INTERRUPT)
@@ -1662,7 +1657,9 @@ static void at_xdmac_tasklet(unsigned long data)
 
 		dma_run_dependencies(txd);
 
+		spin_lock_irq(&atchan->lock);
 		at_xdmac_advance_work(atchan);
+		spin_unlock_irq(&atchan->lock);
 	}
 }
 
@@ -1723,11 +1720,15 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
 static void at_xdmac_issue_pending(struct dma_chan *chan)
 {
 	struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+	unsigned long flags;
 
 	dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
 
-	if (!at_xdmac_chan_is_cyclic(atchan))
+	if (!at_xdmac_chan_is_cyclic(atchan)) {
+		spin_lock_irqsave(&atchan->lock, flags);
 		at_xdmac_advance_work(atchan);
+		spin_unlock_irqrestore(&atchan->lock, flags);
+	}
 
 	return;
 }
-- 
2.23.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ