[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20230725155530.63315-1-dg573847474@gmail.com>
Date: Tue, 25 Jul 2023 15:55:30 +0000
From: Chengfeng Ye <dg573847474@...il.com>
To: mchehab@...nel.org, hverkuil-cisco@...all.nl, tiwai@...e.de
Cc: linux-media@...r.kernel.org, linux-kernel@...r.kernel.org,
Chengfeng Ye <dg573847474@...il.com>
Subject: [PATCH] media: dvb-core: Fix potential deadlock on &dmxdevfilter->dev->lock
As &dmxdevfilter->dev->lock acquired by timer dvb_dmxdev_filter_timeout()
softirq context, other acquisition of the same lock under process context
should disable irq, otherwise deadlock could happen if the soft irq preempt
the execution while the lock is held in process context on the same CPU.
The dvb_dmxdev_section_callback() and dvb_dmxdev_ts_callback() callbacks
acquires the lock without disabling irq inside the function.
[Possible deadlock scenario]
dvb_dmxdev_section_callback()
-> spin_lock(&dmxdevfilter->dev->lock);
<timer interrupt>
-> dvb_dmxdev_filter_timeout()
-> spin_lock_irq(&dmxdevfilter->dev->lock);
This flaw was found by an experimental static analysis tool I am developing
for irq-related deadlock.
The tentative patch fixes the potential deadlock by spin_lock_bh()
disable softirq.
Signed-off-by: Chengfeng Ye <dg573847474@...il.com>
---
drivers/media/dvb-core/dmxdev.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 9ce5f010de3f..e4b6427230ed 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -386,9 +386,9 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
wake_up(&dmxdevfilter->buffer.queue);
return 0;
}
- spin_lock(&dmxdevfilter->dev->lock);
+ spin_lock_bh(&dmxdevfilter->dev->lock);
if (dmxdevfilter->state != DMXDEV_STATE_GO) {
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_bh(&dmxdevfilter->dev->lock);
return 0;
}
del_timer(&dmxdevfilter->timer);
@@ -413,7 +413,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
dmxdevfilter->buffer.error = ret;
if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
dmxdevfilter->state = DMXDEV_STATE_DONE;
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_bh(&dmxdevfilter->dev->lock);
wake_up(&dmxdevfilter->buffer.queue);
return 0;
}
@@ -430,9 +430,9 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
#endif
int ret;
- spin_lock(&dmxdevfilter->dev->lock);
+ spin_lock_bh(&dmxdevfilter->dev->lock);
if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_bh(&dmxdevfilter->dev->lock);
return 0;
}
@@ -457,7 +457,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
buffer_flags);
} else {
if (buffer->error) {
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_bh(&dmxdevfilter->dev->lock);
wake_up(&buffer->queue);
return 0;
}
@@ -468,7 +468,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
}
if (ret < 0)
buffer->error = ret;
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_bh(&dmxdevfilter->dev->lock);
wake_up(&buffer->queue);
return 0;
}
--
2.17.1
Powered by blists - more mailing lists