lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 17 Apr 2015 12:17:18 -0400
From:	Chris Metcalf <cmetcalf@...hip.com>
To:	Frederic Weisbecker <fweisbec@...il.com>
CC:	Don Zickus <dzickus@...hat.com>, Ingo Molnar <mingo@...nel.org>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Andrew Jones <drjones@...hat.com>,
	chai wen <chaiw.fnst@...fujitsu.com>,
	Ulrich Obergfell <uobergfe@...hat.com>,
	Fabian Frederick <fabf@...net.be>,
	Aaron Tomlin <atomlin@...hat.com>,
	Ben Zhang <benzh@...omium.org>,
	Christoph Lameter <cl@...ux.com>,
	Gilad Ben-Yossef <gilad@...yossef.com>,
	Steven Rostedt <rostedt@...dmis.org>,
	<linux-kernel@...r.kernel.org>, Jonathan Corbet <corbet@....net>,
	<linux-doc@...r.kernel.org>, Thomas Gleixner <tglx@...utronix.de>,
	Peter Zijlstra <peterz@...radead.org>
Subject: Re: [PATCH v8 1/3] smpboot: allow excluding cpus from the smpboot
 threads

On 04/16/2015 11:28 AM, Frederic Weisbecker wrote:
> So unfortunately I had to see the result to realize my mistake on one detail.
> With this scheme, it's not clear who allocates and who releases the cpumasks.
> If the caller of smpboot_register_percpu_thread() allocates the cpumask, then he
> should release it itself after calling smpboot_unregister_percpu_thread(). But
> if the cpumask is NULL and we call smpboot_update_cpumask_percpu_thread(), it's
> not clear to the caller if we make a copy, if he can release it after calling
> the function, etc...
>
> So the client should not touch the cpumask field of struct smp_hotplug_thread at all
> and it should pass the cpumask to smpboot_register_percpu_thread() and smpboot_update_cpumask_percpu_thread().
>
> smpboot subsystem then does its own copy to the struct smp_hotplug_thread which it releases from
> smpboot_unregister_percpu_thread().
>
> This way we prevent from any nasty side effet or headscratch about who is responsible
> of allocations and releases.

So here's a diff to change things in line with your suggestions.
Other than the unfortunate awkwardness that we now have to
test for error return from smpboot_update_cpumask_percpu_thread(),
I think it's a net win.

Note that I assume it's reasonable to register the smpboot thread
and create and unpark all the threads, then later call back in to
park the ones that we want to park.  This avoids having to create
another API just for the case of the watchdog.  It has some runtime
cost but I think relatively minor.

This is just the delta diff for your convenience; I'll roll out a v9 with
the various suggestions I've received shortly.

diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index 63271b19333e..7c42153edfac 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -27,9 +27,8 @@ struct smpboot_thread_data;
   * @pre_unpark:		Optional unpark function, called before the thread is
   *			unparked (cpu online). This is not guaranteed to be
   *			called on the target cpu of the thread. Careful!
- * @cpumask:		Optional pointer to a set of possible cores to
- *			allow threads to come unparked on.  To change it later
- *			you must call smpboot_update_cpumask_percpu_thread().
+ * @cpumask:		Internal state.  To update which threads are unparked,
+ *			call smpboot_update_cpumask_percpu_thread().
   * @selfparking:	Thread is not parked by the park function.
   * @thread_comm:	The base name of the thread
   */
@@ -44,14 +43,14 @@ struct smp_hotplug_thread {
  	void				(*park)(unsigned int cpu);
  	void				(*unpark)(unsigned int cpu);
  	void				(*pre_unpark)(unsigned int cpu);
-	struct cpumask			*cpumask;
+	struct cpumask			cpumask;
  	bool				selfparking;
  	const char			*thread_comm;
  };
  
  int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
  void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-					  const struct cpumask *);
+int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+					 const struct cpumask *);
  
  #endif
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index c5d53a335387..0d131daf3e7f 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -92,9 +92,6 @@ enum {
  	HP_THREAD_PARKED,
  };
  
-/* Statically allocated and used under smpboot_threads_lock. */
-static struct cpumask tmp_mask;
-
  /**
   * smpboot_thread_fn - percpu hotplug thread loop function
   * @data:	thread data pointer
@@ -235,7 +232,7 @@ void smpboot_unpark_threads(unsigned int cpu)
  
  	mutex_lock(&smpboot_threads_lock);
  	list_for_each_entry(cur, &hotplug_threads, list)
-		if (cur->cpumask == NULL || cpumask_test_cpu(cpu, cur->cpumask))
+		if (cpumask_test_cpu(cpu, &cur->cpumask))
  			smpboot_unpark_thread(cur, cpu);
  	mutex_unlock(&smpboot_threads_lock);
  }
@@ -263,9 +260,8 @@ static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
  	unsigned int cpu;
  
  	/* Unpark any threads that were voluntarily parked. */
-	if (ht->cpumask) {
-		cpumask_andnot(&tmp_mask, cpu_online_mask, ht->cpumask);
-		for_each_cpu(cpu, &tmp_mask) {
+	for_each_cpu_not(cpu, &ht->cpumask) {
+		if (cpu_online(cpu)) {
  			struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
  			if (tsk)
  				kthread_unpark(tsk);
@@ -295,6 +291,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
  	unsigned int cpu;
  	int ret = 0;
  
+	cpumask_copy(&plug_thread->cpumask, cpu_possible_mask);
  	get_online_cpus();
  	mutex_lock(&smpboot_threads_lock);
  	for_each_online_cpu(cpu) {
@@ -303,9 +300,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
  			smpboot_destroy_threads(plug_thread);
  			goto out;
  		}
-		if (plug_thread->cpumask == NULL ||
-		    cpumask_test_cpu(cpu, plug_thread->cpumask))
-			smpboot_unpark_thread(plug_thread, cpu);
+		smpboot_unpark_thread(plug_thread, cpu);
  	}
  	list_add(&plug_thread->list, &hotplug_threads);
  out:
@@ -338,34 +333,39 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
   * @new:		Revised mask to use
   *
   * The cpumask field in the smp_hotplug_thread must not be updated directly
- * by the client, but only by calling this function.  A non-NULL cpumask must
- * have been provided at registration time to be able to use this function.
+ * by the client, but only by calling this function.
   */
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-					  const struct cpumask *new)
+int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+					 const struct cpumask *new)
  {
+	struct cpumask *old = &plug_thread->cpumask;
+	cpumask_var_t tmp;
  	unsigned int cpu;
-	struct cpumask *old = plug_thread->cpumask;
  
-	BUG_ON(old == NULL);
+	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+		return -ENOMEM;
  
  	get_online_cpus();
  	mutex_lock(&smpboot_threads_lock);
  
  	/* Park threads that were exclusively enabled on the old mask. */
-	cpumask_andnot(&tmp_mask, old, new);
-	for_each_cpu_and(cpu, &tmp_mask, cpu_online_mask)
+	cpumask_andnot(&tmp, old, new);
+	for_each_cpu_and(cpu, &tmp, cpu_online_mask)
  		smpboot_park_thread(plug_thread, cpu);
  
  	/* Unpark threads that are exclusively enabled on the new mask. */
-	cpumask_andnot(&tmp_mask, new, old);
-	for_each_cpu_and(cpu, &tmp_mask, cpu_online_mask)
+	cpumask_andnot(&tmp, new, old);
+	for_each_cpu_and(cpu, &tmp, cpu_online_mask)
  		smpboot_unpark_thread(plug_thread, cpu);
  
  	cpumask_copy(old, new);
  
  	mutex_unlock(&smpboot_threads_lock);
  	put_online_cpus();
+
+	free_cpumask_var(tmp);
+
+	return 0;
  }
  EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
  
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index a4e1c9a2e769..f3702cf7582b 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -57,7 +57,6 @@ int __read_mostly sysctl_softlockup_all_cpu_backtrace;
  #else
  #define sysctl_softlockup_all_cpu_backtrace 0
  #endif
-static cpumask_var_t watchdog_cpumask_for_smpboot;
  static cpumask_var_t watchdog_cpumask;
  unsigned long *watchdog_cpumask_bits;
  
@@ -706,8 +705,12 @@ static int watchdog_enable_all_cpus(void)
  		err = smpboot_register_percpu_thread(&watchdog_threads);
  		if (err)
  			pr_err("Failed to create watchdog threads, disabled\n");
-		else
+		else {
+			if (smpboot_update_cpumask_percpu_thread(
+				    &watchdog_threads, watchdog_cpumask))
+				pr_err("Failed to set cpumask for watchdog threads\n");
  			watchdog_running = 1;
+		}
  	} else {
  		/*
  		 * Enable/disable the lockup detectors or
@@ -911,12 +914,10 @@ void __init lockup_detector_init(void)
  {
  	set_sample_period();
  
-	/* One cpumask is allocated for smpboot to own. */
-	alloc_cpumask_var(&watchdog_cpumask_for_smpboot, GFP_KERNEL);
-	watchdog_threads.cpumask = watchdog_cpumask_for_smpboot;
-
-	/* Another cpumask is allocated for /proc to use. */
-	alloc_cpumask_var(&watchdog_cpumask, GFP_KERNEL);
+	if (!alloc_cpumask_var(&watchdog_cpumask, GFP_KERNEL)) {
+		pr_err("Failed to allocate cpumask for watchdog");
+		return;
+	}
  	watchdog_cpumask_bits = cpumask_bits(watchdog_cpumask);
  
  #ifdef CONFIG_NO_HZ_FULL

-- 
Chris Metcalf, EZChip Semiconductor
http://www.ezchip.com

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ