[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231207133452.14726-1-dg573847474@gmail.com>
Date: Thu, 7 Dec 2023 13:34:52 +0000
From: Chengfeng Ye <dg573847474@...il.com>
To: vkoul@...nel.org, allen.lkml@...il.com, arnd@...db.de,
christophe.jaillet@...adoo.fr
Cc: dmaengine@...r.kernel.org, linux-kernel@...r.kernel.org,
Chengfeng Ye <dg573847474@...il.com>
Subject: [PATCH v2] dmaengine: pch_dma: Fix potential deadlock on &pd_chan->lock
As &pd_chan->lock is acquired by pdc_tasklet() under softirq context,
other acquisition of the same lock under process context should at least
disable bottom half, otherwise deadlock could happen if the tasklet
preempts the execution while the lock is held under process context on
the same CPU.
pd_issue_pending(), pd_tx_submit(), pdc_desc_put() and pdc_desc_get()
seem like that could execute under process context without bottom half
disaled.
One Possible deadlock scenario:
pd_prep_slave_sg()
-> pdc_desc_put()
-> spin_lock(&pd_chan->lock)
<tasklet softirq interruption>
-> pdc_tasklet()
-> spin_lock_irqsave(&pd_chan->lock, flags); (deadlock here)
This flaw was found by an experimental static analysis tool I am developing
for irq-related deadlock.
The tentative patch fixes the potential deadlock by spin_lock_bh() to
disable bottom half while lock is held.
Signed-off-by: Chengfeng Ye <dg573847474@...il.com>
---
V2: change to spin_lock_bh() instead of spin_lock_irqsave()
drivers/dma/pch_dma.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c359decc07a3..2c4e479ca124 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -410,7 +410,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
struct pch_dma_desc *desc = to_pd_desc(txd);
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
- spin_lock(&pd_chan->lock);
+ spin_lock_bh(&pd_chan->lock);
if (list_empty(&pd_chan->active_list)) {
list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -419,7 +419,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
list_add_tail(&desc->desc_node, &pd_chan->queue);
}
- spin_unlock(&pd_chan->lock);
+ spin_unlock_bh(&pd_chan->lock);
return 0;
}
@@ -447,7 +447,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
struct pch_dma_desc *ret = NULL;
int i = 0;
- spin_lock(&pd_chan->lock);
+ spin_lock_bh(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
i++;
if (async_tx_test_ack(&desc->txd)) {
@@ -457,15 +457,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
}
dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
}
- spin_unlock(&pd_chan->lock);
+ spin_unlock_bh(&pd_chan->lock);
dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
if (!ret) {
ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
if (ret) {
- spin_lock(&pd_chan->lock);
+ spin_lock_bh(&pd_chan->lock);
pd_chan->descs_allocated++;
- spin_unlock(&pd_chan->lock);
+ spin_unlock_bh(&pd_chan->lock);
} else {
dev_err(chan2dev(&pd_chan->chan),
"failed to alloc desc\n");
@@ -478,11 +478,12 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
static void pdc_desc_put(struct pch_dma_chan *pd_chan,
struct pch_dma_desc *desc)
{
+
if (desc) {
- spin_lock(&pd_chan->lock);
+ spin_lock_bh(&pd_chan->lock);
list_splice_init(&desc->tx_list, &pd_chan->free_list);
list_add(&desc->desc_node, &pd_chan->free_list);
- spin_unlock(&pd_chan->lock);
+ spin_unlock_bh(&pd_chan->lock);
}
}
@@ -557,9 +558,9 @@ static void pd_issue_pending(struct dma_chan *chan)
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
if (pdc_is_idle(pd_chan)) {
- spin_lock(&pd_chan->lock);
+ spin_lock_bh(&pd_chan->lock);
pdc_advance_work(pd_chan);
- spin_unlock(&pd_chan->lock);
+ spin_unlock_bh(&pd_chan->lock);
}
}
--
2.17.1
Powered by blists - more mailing lists