[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210609231507.3031904-1-bjorn.andersson@linaro.org>
Date: Wed, 9 Jun 2021 16:15:07 -0700
From: Bjorn Andersson <bjorn.andersson@...aro.org>
To: Rob Clark <robdclark@...il.com>, Sean Paul <sean@...rly.run>,
David Airlie <airlied@...ux.ie>,
Daniel Vetter <daniel@...ll.ch>,
Dmitry Baryshkov <dmitry.baryshkov@...aro.org>,
Abhinav Kumar <abhinavk@...eaurora.org>
Cc: linux-arm-msm@...r.kernel.org, dri-devel@...ts.freedesktop.org,
freedreno@...ts.freedesktop.org, linux-kernel@...r.kernel.org
Subject: [PATCH] drm/msm/dpu: Avoid ABBA deadlock between IRQ modules
Handling of the interrupt callback lists is done in dpu_core_irq.c,
under the "cb_lock" spinlock. When these operations results in the need
for enableing or disabling the IRQ in the hardware the code jumps to
dpu_hw_interrupts.c, which protects its operations with "irq_lock"
spinlock.
When an interrupt fires, dpu_hw_intr_dispatch_irq() inspects the
hardware state while holding the "irq_lock" spinlock and jumps to
dpu_core_irq_callback_handler() to invoke the registered handlers, which
traverses the callback list under the "cb_lock" spinlock.
As such, in the event that these happens concurrently we'll end up with
a deadlock.
Prior to '1c1e7763a6d4 ("drm/msm/dpu: simplify IRQ enabling/disabling")'
the enable/disable of the hardware interrupt was done outside the
"cb_lock" region, optimitically by using an atomic enable-counter for
each interrupt and an warning print if someone changed the list between
the atomic_read and the time the operation concluded.
Rather than re-introducing the large array of atomics, serialize the
register/unregister operations under a single mutex.
Fixes: 1c1e7763a6d4 ("drm/msm/dpu: simplify IRQ enabling/disabling")
Signed-off-by: Bjorn Andersson <bjorn.andersson@...aro.org>
---
drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c | 10 +++++++---
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h | 2 ++
2 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index 4f110c428b60..62bbe35eff7b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -82,11 +82,13 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+ mutex_lock(&dpu_kms->irq_obj.hw_enable_lock);
spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
list_del_init(®ister_irq_cb->list);
list_add_tail(®ister_irq_cb->list,
&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
if (list_is_first(®ister_irq_cb->list,
&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
int ret = dpu_kms->hw_intr->ops.enable_irq(
@@ -96,8 +98,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
}
-
- spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ mutex_unlock(&dpu_kms->irq_obj.hw_enable_lock);
return 0;
}
@@ -127,9 +128,11 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+ mutex_lock(&dpu_kms->irq_obj.hw_enable_lock);
spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
list_del_init(®ister_irq_cb->list);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
/* empty callback list but interrupt is still enabled */
if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
int ret = dpu_kms->hw_intr->ops.disable_irq(
@@ -140,7 +143,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
irq_idx);
DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
}
- spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ mutex_unlock(&dpu_kms->irq_obj.hw_enable_lock);
return 0;
}
@@ -207,6 +210,7 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ mutex_init(&dpu_kms->irq_obj.hw_enable_lock);
spin_lock_init(&dpu_kms->irq_obj.cb_lock);
/* Create irq callbacks for all possible irq_idx */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index f6840b1af6e4..5a162caea29d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -83,6 +83,7 @@ struct dpu_irq_callback {
* @total_irq: total number of irq_idx obtained from HW interrupts mapping
* @irq_cb_tbl: array of IRQ callbacks setting
* @cb_lock: callback lock
+ * @hw_enable_lock: lock to synchronize callback register and unregister
* @debugfs_file: debugfs file for irq statistics
*/
struct dpu_irq {
@@ -90,6 +91,7 @@ struct dpu_irq {
struct list_head *irq_cb_tbl;
atomic_t *irq_counts;
spinlock_t cb_lock;
+ struct mutex hw_enable_lock;
};
struct dpu_kms {
--
2.29.2
Powered by blists - more mailing lists