[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180411183627.044868504@linuxfoundation.org>
Date: Wed, 11 Apr 2018 20:34:12 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Thomas Gleixner <tglx@...utronix.de>,
"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Sebastian Siewior <bigeasy@...utronix.de>,
Steven Rostedt <rostedt@...dmis.org>,
Sasha Levin <alexander.levin@...rosoft.com>
Subject: [PATCH 4.9 113/310] cpuhotplug: Link lock stacks for hotplug callbacks
4.9-stable review patch. If anyone has any objections, please let me know.
------------------
From: Thomas Gleixner <tglx@...utronix.de>
[ Upstream commit 49dfe2a6779717d9c18395684ee31bdc98b22e53 ]
The CPU hotplug callbacks are not covered by lockdep versus the cpu hotplug
rwsem.
CPU0 CPU1
cpuhp_setup_state(STATE, startup, teardown);
cpus_read_lock();
invoke_callback_on_ap();
kick_hotplug_thread(ap);
wait_for_completion(); hotplug_thread_fn()
lock(m);
do_stuff();
unlock(m);
Lockdep does not know about this dependency and will not trigger on the
following code sequence:
lock(m);
cpus_read_lock();
Add a lockdep map and connect the initiators lock chain with the hotplug
thread lock chain, so potential deadlocks can be detected.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Tested-by: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Sebastian Siewior <bigeasy@...utronix.de>
Cc: Steven Rostedt <rostedt@...dmis.org>
Link: http://lkml.kernel.org/r/20170524081549.709375845@linutronix.de
Signed-off-by: Sasha Levin <alexander.levin@...rosoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
kernel/cpu.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -63,6 +63,12 @@ struct cpuhp_cpu_state {
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
+#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
+static struct lock_class_key cpuhp_state_key;
+static struct lockdep_map cpuhp_state_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
+#endif
+
/**
* cpuhp_step - Hotplug state machine step
* @name: Name of the step
@@ -563,6 +569,7 @@ static void cpuhp_thread_fun(unsigned in
st->should_run = false;
+ lock_map_acquire(&cpuhp_state_lock_map);
/* Single callback invocation for [un]install ? */
if (st->single) {
if (st->cb_state < CPUHP_AP_ONLINE) {
@@ -594,6 +601,7 @@ static void cpuhp_thread_fun(unsigned in
else if (st->state > st->target)
ret = cpuhp_ap_offline(cpu, st);
}
+ lock_map_release(&cpuhp_state_lock_map);
st->result = ret;
complete(&st->done);
}
@@ -608,6 +616,9 @@ cpuhp_invoke_ap_callback(int cpu, enum c
if (!cpu_online(cpu))
return 0;
+ lock_map_acquire(&cpuhp_state_lock_map);
+ lock_map_release(&cpuhp_state_lock_map);
+
/*
* If we are up and running, use the hotplug thread. For early calls
* we invoke the thread function directly.
@@ -651,6 +662,8 @@ static int cpuhp_kick_ap_work(unsigned i
enum cpuhp_state state = st->state;
trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
+ lock_map_acquire(&cpuhp_state_lock_map);
+ lock_map_release(&cpuhp_state_lock_map);
__cpuhp_kick_ap_work(st);
wait_for_completion(&st->done);
trace_cpuhp_exit(cpu, st->state, state, st->result);
Powered by blists - more mailing lists