lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230810154114.405742-1-longman@redhat.com>
Date:   Thu, 10 Aug 2023 11:41:14 -0400
From:   Waiman Long <longman@...hat.com>
To:     Will Deacon <will@...nel.org>, Mark Rutland <mark.rutland@....com>
Cc:     linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
        Robin Murphy <robin.murphy@....com>,
        Waiman Long <longman@...hat.com>
Subject: [PATCH v5] perf/arm-dmc620: Fix dmc620_pmu_irqs_lock/cpu_hotplug_lock circular lock dependency

The following circular locking dependency was reported when running
cpus online/offline test on an arm64 system.

[   84.195923] Chain exists of:
                 dmc620_pmu_irqs_lock --> cpu_hotplug_lock --> cpuhp_state-down

[   84.207305]  Possible unsafe locking scenario:

[   84.213212]        CPU0                    CPU1
[   84.217729]        ----                    ----
[   84.222247]   lock(cpuhp_state-down);
[   84.225899]                                lock(cpu_hotplug_lock);
[   84.232068]                                lock(cpuhp_state-down);
[   84.238237]   lock(dmc620_pmu_irqs_lock);
[   84.242236]
                *** DEADLOCK ***

The problematic locking order seems to be

	lock(dmc620_pmu_irqs_lock) --> lock(cpu_hotplug_lock)

This locking order happens when dmc620_pmu_get_irq() calls
cpuhp_state_add_instance_nocalls(). Since dmc620_pmu_irqs_lock is used
for protecting the dmc620_pmu_irqs structure, we don't actually need
to hold the lock when adding a new instance to the CPU hotplug subsystem.

Fix this possible deadlock scenario by adding a new dmc620_pmu_get_lock
for protecting the call to __dmc620_pmu_get_irq(). While at it, rename
dmc620_pmu_irqs_lock to dmc620_pmu_list_lock as it is now just protecting
the iteration and modification of pmus_node and irqs_node lists.

As a result, cpuhp_state_add_instance_nocalls() won't be called with
reanemd dmc620_pmu_list_lock held and cpu_hotplug_lock won't be acquired
after dmc620_pmu_list_lock.

Suggested-by: Robin Murphy <robin.murphy@....com>
Signed-off-by: Waiman Long <longman@...hat.com>
---
 drivers/perf/arm_dmc620_pmu.c | 34 ++++++++++++++++++++++------------
 1 file changed, 22 insertions(+), 12 deletions(-)

diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 9d0f01c4455a..a5bfc8f2e6ab 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -66,8 +66,14 @@
 #define DMC620_PMU_COUNTERn_OFFSET(n) \
 	(DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
 
+/*
+ * The allowable lock ordering is:
+ * - dmc620_pmu_get_lock (protects call to __dmc620_pmu_get_irq())
+ * - dmc620_pmu_list_lock (protects pmus_node & irqs_node lists)
+ */
+static DEFINE_MUTEX(dmc620_pmu_get_lock);
+static DEFINE_MUTEX(dmc620_pmu_list_lock);
 static LIST_HEAD(dmc620_pmu_irqs);
-static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
 
 struct dmc620_pmu_irq {
 	struct hlist_node node;
@@ -423,9 +429,11 @@ static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
 	struct dmc620_pmu_irq *irq;
 	int ret;
 
+	mutex_lock(&dmc620_pmu_list_lock);
 	list_for_each_entry(irq, &dmc620_pmu_irqs, irqs_node)
 		if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount))
-			return irq;
+			goto unlock_out;
+	mutex_unlock(&dmc620_pmu_list_lock);
 
 	irq = kzalloc(sizeof(*irq), GFP_KERNEL);
 	if (!irq)
@@ -452,8 +460,10 @@ static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
 		goto out_free_irq;
 
 	irq->irq_num = irq_num;
+	mutex_lock(&dmc620_pmu_list_lock);
 	list_add(&irq->irqs_node, &dmc620_pmu_irqs);
-
+unlock_out:
+	mutex_unlock(&dmc620_pmu_list_lock);
 	return irq;
 
 out_free_irq:
@@ -467,17 +477,17 @@ static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
 {
 	struct dmc620_pmu_irq *irq;
 
-	mutex_lock(&dmc620_pmu_irqs_lock);
+	mutex_lock(&dmc620_pmu_get_lock);
 	irq = __dmc620_pmu_get_irq(irq_num);
-	mutex_unlock(&dmc620_pmu_irqs_lock);
+	mutex_unlock(&dmc620_pmu_get_lock);
 
 	if (IS_ERR(irq))
 		return PTR_ERR(irq);
 
 	dmc620_pmu->irq = irq;
-	mutex_lock(&dmc620_pmu_irqs_lock);
+	mutex_lock(&dmc620_pmu_list_lock);
 	list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
-	mutex_unlock(&dmc620_pmu_irqs_lock);
+	mutex_unlock(&dmc620_pmu_list_lock);
 
 	return 0;
 }
@@ -486,16 +496,16 @@ static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
 {
 	struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
 
-	mutex_lock(&dmc620_pmu_irqs_lock);
+	mutex_lock(&dmc620_pmu_list_lock);
 	list_del_rcu(&dmc620_pmu->pmus_node);
 
 	if (!refcount_dec_and_test(&irq->refcount)) {
-		mutex_unlock(&dmc620_pmu_irqs_lock);
+		mutex_unlock(&dmc620_pmu_list_lock);
 		return;
 	}
 
 	list_del(&irq->irqs_node);
-	mutex_unlock(&dmc620_pmu_irqs_lock);
+	mutex_unlock(&dmc620_pmu_list_lock);
 
 	free_irq(irq->irq_num, irq);
 	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
@@ -638,10 +648,10 @@ static int dmc620_pmu_cpu_teardown(unsigned int cpu,
 		return 0;
 
 	/* We're only reading, but this isn't the place to be involving RCU */
-	mutex_lock(&dmc620_pmu_irqs_lock);
+	mutex_lock(&dmc620_pmu_list_lock);
 	list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
 		perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
-	mutex_unlock(&dmc620_pmu_irqs_lock);
+	mutex_unlock(&dmc620_pmu_list_lock);
 
 	WARN_ON(irq_set_affinity(irq->irq_num, cpumask_of(target)));
 	irq->cpu = target;
-- 
2.31.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ