[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240906093444.3571619-6-huangjunxian6@hisilicon.com>
Date: Fri, 6 Sep 2024 17:34:40 +0800
From: Junxian Huang <huangjunxian6@...ilicon.com>
To: <jgg@...pe.ca>, <leon@...nel.org>
CC: <linux-rdma@...r.kernel.org>, <linuxarm@...wei.com>,
<linux-kernel@...r.kernel.org>, <huangjunxian6@...ilicon.com>
Subject: [PATCH for-next 5/9] RDMA/hns: Fix spin_unlock_irqrestore() called with IRQs enabled
From: Chengchang Tang <tangchengchang@...wei.com>
Fix missuse of spin_lock_irq()/spin_unlock_irq() when
spin_lock_irqsave()/spin_lock_irqrestore() was hold.
This was discovered through the lock debugging, and the corresponding
log is as follows:
raw_local_irq_restore() called with IRQs enabled
WARNING: CPU: 96 PID: 2074 at kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x30/0x40
...
Call trace:
warn_bogus_irq_restore+0x30/0x40
_raw_spin_unlock_irqrestore+0x84/0xc8
add_qp_to_list+0x11c/0x148 [hns_roce_hw_v2]
hns_roce_create_qp_common.constprop.0+0x240/0x780 [hns_roce_hw_v2]
hns_roce_create_qp+0x98/0x160 [hns_roce_hw_v2]
create_qp+0x138/0x258
ib_create_qp_kernel+0x50/0xe8
create_mad_qp+0xa8/0x128
ib_mad_port_open+0x218/0x448
ib_mad_init_device+0x70/0x1f8
add_client_context+0xfc/0x220
enable_device_and_get+0xd0/0x140
ib_register_device.part.0+0xf4/0x1c8
ib_register_device+0x34/0x50
hns_roce_register_device+0x174/0x3d0 [hns_roce_hw_v2]
hns_roce_init+0xfc/0x2c0 [hns_roce_hw_v2]
__hns_roce_hw_v2_init_instance+0x7c/0x1d0 [hns_roce_hw_v2]
hns_roce_hw_v2_init_instance+0x9c/0x180 [hns_roce_hw_v2]
Fixes: 9a4435375cd1 ("IB/hns: Add driver files for hns RoCE driver")
Signed-off-by: Chengchang Tang <tangchengchang@...wei.com>
Signed-off-by: Junxian Huang <huangjunxian6@...ilicon.com>
---
drivers/infiniband/hw/hns/hns_roce_qp.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 1de384ce4d0e..6b03ba671ff8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1460,19 +1460,19 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
__acquire(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
__acquire(&send_cq->lock);
} else if (send_cq == recv_cq) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
@@ -1492,13 +1492,13 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
spin_unlock(&recv_cq->lock);
} else if (send_cq == recv_cq) {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
}
}
--
2.33.0
Powered by blists - more mailing lists