[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230729092634.78336-1-dg573847474@gmail.com>
Date: Sat, 29 Jul 2023 09:26:34 +0000
From: Chengfeng Ye <dg573847474@...il.com>
To: gregkh@...uxfoundation.org, gustavoars@...nel.org,
u.kleine-koenig@...gutronix.de
Cc: giometti@...ux.it, linux-usb@...r.kernel.org,
linux-kernel@...r.kernel.org, Chengfeng Ye <dg573847474@...il.com>
Subject: [PATCH v2] usb: host: oxu210hp-hcd: Fix potential deadlock on &oxu->mem_lock
&oxu->mem_lock is acquired by isr oxu_irq() along the below call
chain under hardirq context.
<hard interrupt>
-> oxu_irq()
-> oxu210_hcd_irq()
-> ehci_work()
-> scan_async()
-> qh_completions()
-> oxu_murb_free()
-> spin_lock(&oxu->mem_lock)
Thus the acquisition of the lock under process context should disable
irq, otherwise deadlock could happen if the irq happens to preempt the
execution while the lock is held in process context on the same CPU.
This flaw was found by an experimental static analysis tool I am developing
for irq-related deadlock. x86_64 allmodconfig using gcc shows no new
warning.
The patch fixes the potential deadlocks by using spin_lock_irqsave() on
&oxu->mem_lock
Signed-off-by: Chengfeng Ye <dg573847474@...il.com>
Changes in v2
- use spin_lock_irqsave() on more potential deadlock sites of &oxu->mem_lock
---
drivers/usb/host/oxu210hp-hcd.c | 42 ++++++++++++++++++++-------------
1 file changed, 25 insertions(+), 17 deletions(-)
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 50c1ccabb0f5..83061204b033 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -908,6 +908,7 @@ static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
{
int n_blocks; /* minium blocks needed to hold len */
int a_blocks; /* blocks allocated */
+ unsigned long flags;
int i, j;
/* Don't allocte bigger than supported */
@@ -916,7 +917,7 @@ static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
return -ENOMEM;
}
- spin_lock(&oxu->mem_lock);
+ spin_lock_irqsave(&oxu->mem_lock, flags);
/* Number of blocks needed to hold len */
n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
@@ -944,14 +945,14 @@ static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
oxu->db_used[i] = a_blocks;
- spin_unlock(&oxu->mem_lock);
+ spin_unlock_irqrestore(&oxu->mem_lock, flags);
return 0;
}
/* Failed */
- spin_unlock(&oxu->mem_lock);
+ spin_unlock_irqrestore(&oxu->mem_lock, flags);
return -ENOMEM;
}
@@ -959,8 +960,9 @@ static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
+ unsigned long flags;
- spin_lock(&oxu->mem_lock);
+ spin_lock_irqsave(&oxu->mem_lock, flags);
index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
/ BUFFER_SIZE;
@@ -969,7 +971,7 @@ static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
qtd->buffer_dma = 0;
qtd->buffer = NULL;
- spin_unlock(&oxu->mem_lock);
+ spin_unlock_irqrestore(&oxu->mem_lock, flags);
}
static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
@@ -985,24 +987,26 @@ static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
+ unsigned long flags;
if (qtd->buffer)
oxu_buf_free(oxu, qtd);
- spin_lock(&oxu->mem_lock);
+ spin_lock_irqsave(&oxu->mem_lock, flags);
index = qtd - &oxu->mem->qtd_pool[0];
oxu->qtd_used[index] = 0;
- spin_unlock(&oxu->mem_lock);
+ spin_unlock_irqrestore(&oxu->mem_lock, flags);
}
static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qtd *qtd = NULL;
+ unsigned long flags;
- spin_lock(&oxu->mem_lock);
+ spin_lock_irqsave(&oxu->mem_lock, flags);
for (i = 0; i < QTD_NUM; i++)
if (!oxu->qtd_used[i])
@@ -1022,7 +1026,7 @@ static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
oxu->qtd_used[i] = 1;
}
- spin_unlock(&oxu->mem_lock);
+ spin_unlock_irqrestore(&oxu->mem_lock, flags);
return qtd;
}
@@ -1030,13 +1034,14 @@ static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int index;
+ unsigned long flags;
- spin_lock(&oxu->mem_lock);
+ spin_lock_irqsave(&oxu->mem_lock, flags);
index = qh - &oxu->mem->qh_pool[0];
oxu->qh_used[index] = 0;
- spin_unlock(&oxu->mem_lock);
+ spin_unlock_irqrestore(&oxu->mem_lock, flags);
}
static void qh_destroy(struct kref *kref)
@@ -1058,8 +1063,9 @@ static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qh *qh = NULL;
+ unsigned long flags;
- spin_lock(&oxu->mem_lock);
+ spin_lock_irqsave(&oxu->mem_lock, flags);
for (i = 0; i < QHEAD_NUM; i++)
if (!oxu->qh_used[i])
@@ -1086,7 +1092,7 @@ static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
oxu->qh_used[i] = 1;
}
unlock:
- spin_unlock(&oxu->mem_lock);
+ spin_unlock_irqrestore(&oxu->mem_lock, flags);
return qh;
}
@@ -1106,13 +1112,14 @@ static inline void qh_put(struct ehci_qh *qh)
static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
{
int index;
+ unsigned long flags;
- spin_lock(&oxu->mem_lock);
+ spin_lock_irqsave(&oxu->mem_lock, flags);
index = murb - &oxu->murb_pool[0];
oxu->murb_used[index] = 0;
- spin_unlock(&oxu->mem_lock);
+ spin_unlock_irqrestore(&oxu->mem_lock, flags);
}
static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
@@ -1120,8 +1127,9 @@ static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
{
int i;
struct oxu_murb *murb = NULL;
+ unsigned long flags;
- spin_lock(&oxu->mem_lock);
+ spin_lock_irqsave(&oxu->mem_lock, flags);
for (i = 0; i < MURB_NUM; i++)
if (!oxu->murb_used[i])
@@ -1133,7 +1141,7 @@ static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
oxu->murb_used[i] = 1;
}
- spin_unlock(&oxu->mem_lock);
+ spin_unlock_irqrestore(&oxu->mem_lock, flags);
return murb;
}
--
2.17.1
Powered by blists - more mailing lists