lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-e31d6883f21c1cdfe5bc64e28411f8a92b783fde@git.kernel.org>
Date:   Wed, 4 Oct 2017 01:58:22 -0700
From:   tip-bot for Thomas Gleixner <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     dzickus@...hat.com, npiggin@...il.com,
        torvalds@...uxfoundation.org, peterz@...radead.org,
        linux-kernel@...r.kernel.org, benh@...nel.crashing.org,
        tglx@...utronix.de, mingo@...nel.org, mpe@...erman.id.au,
        hpa@...or.com
Subject: [tip:core/watchdog] watchdog/core, powerpc: Lock cpus across
 reconfiguration

Commit-ID:  e31d6883f21c1cdfe5bc64e28411f8a92b783fde
Gitweb:     https://git.kernel.org/tip/e31d6883f21c1cdfe5bc64e28411f8a92b783fde
Author:     Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Tue, 3 Oct 2017 16:37:53 +0200
Committer:  Thomas Gleixner <tglx@...utronix.de>
CommitDate: Wed, 4 Oct 2017 10:53:54 +0200

watchdog/core, powerpc: Lock cpus across reconfiguration

Instead of dropping the cpu hotplug lock after stopping NMI watchdog and
threads and reaquiring for restart, the code and the protection rules
become more obvious when holding cpu hotplug lock across the full
reconfiguration.

Suggested-by: Linus Torvalds <torvalds@...uxfoundation.org>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Acked-by: Michael Ellerman <mpe@...erman.id.au>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Don Zickus <dzickus@...hat.com>
Cc: Benjamin Herrenschmidt <benh@...nel.crashing.org>
Cc: Nicholas Piggin <npiggin@...il.com>
Cc: linuxppc-dev@...ts.ozlabs.org
Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1710022105570.2114@nanos
---
 arch/powerpc/kernel/watchdog.c |  4 ----
 kernel/smpboot.c               |  3 +--
 kernel/watchdog.c              | 10 +++++++++-
 3 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 2673ec8..f9b4c63 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -359,21 +359,17 @@ void watchdog_nmi_stop(void)
 {
 	int cpu;
 
-	cpus_read_lock();
 	for_each_cpu(cpu, &wd_cpus_enabled)
 		stop_wd_on_cpu(cpu);
-	cpus_read_unlock();
 }
 
 void watchdog_nmi_start(void)
 {
 	int cpu;
 
-	cpus_read_lock();
 	watchdog_calc_timeouts();
 	for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
 		start_wd_on_cpu(cpu);
-	cpus_read_unlock();
 }
 
 /*
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index ed7507b..5043e74 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -351,7 +351,7 @@ void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread
 	static struct cpumask tmp;
 	unsigned int cpu;
 
-	get_online_cpus();
+	lockdep_assert_cpus_held();
 	mutex_lock(&smpboot_threads_lock);
 
 	/* Park threads that were exclusively enabled on the old mask. */
@@ -367,7 +367,6 @@ void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread
 	cpumask_copy(old, new);
 
 	mutex_unlock(&smpboot_threads_lock);
-	put_online_cpus();
 }
 
 static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 6ad6226..fff90fe 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -535,7 +535,6 @@ static void softlockup_update_smpboot_threads(void)
 
 	smpboot_update_cpumask_percpu_thread(&watchdog_threads,
 					     &watchdog_allowed_mask);
-	__lockup_detector_cleanup();
 }
 
 /* Temporarily park all watchdog threads */
@@ -554,6 +553,7 @@ static void softlockup_unpark_threads(void)
 
 static void softlockup_reconfigure_threads(void)
 {
+	cpus_read_lock();
 	watchdog_nmi_stop();
 	softlockup_park_all_threads();
 	set_sample_period();
@@ -561,6 +561,12 @@ static void softlockup_reconfigure_threads(void)
 	if (watchdog_enabled && watchdog_thresh)
 		softlockup_unpark_threads();
 	watchdog_nmi_start();
+	cpus_read_unlock();
+	/*
+	 * Must be called outside the cpus locked section to prevent
+	 * recursive locking in the perf code.
+	 */
+	__lockup_detector_cleanup();
 }
 
 /*
@@ -605,9 +611,11 @@ static inline void watchdog_disable_all_cpus(void) { }
 static inline void softlockup_init_threads(void) { }
 static void softlockup_reconfigure_threads(void)
 {
+	cpus_read_lock();
 	watchdog_nmi_stop();
 	lockup_detector_update_enable();
 	watchdog_nmi_start();
+	cpus_read_unlock();
 }
 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ