[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1327572121-13673-5-git-send-email-gilad@benyossef.com>
Date: Thu, 26 Jan 2012 12:01:57 +0200
From: Gilad Ben-Yossef <gilad@...yossef.com>
To: linux-kernel@...r.kernel.org
Cc: Gilad Ben-Yossef <gilad@...yossef.com>,
Chris Metcalf <cmetcalf@...era.com>,
Christoph Lameter <cl@...ux-foundation.org>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Frederic Weisbecker <fweisbec@...il.com>,
Russell King <linux@....linux.org.uk>, linux-mm@...ck.org,
Pekka Enberg <penberg@...nel.org>,
Matt Mackall <mpm@...enic.com>,
Sasha Levin <levinsasha928@...il.com>,
Rik van Riel <riel@...hat.com>,
Andi Kleen <andi@...stfloor.org>,
Alexander Viro <viro@...iv.linux.org.uk>,
linux-fsdevel@...r.kernel.org, Avi Kivity <avi@...hat.com>,
Michal Nazarewicz <mina86@...a86.com>,
Kosaki Motohiro <kosaki.motohiro@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Milton Miller <miltonm@....com>
Subject: [v7 4/8] smp: add func to IPI cpus based on parameter func
Add the on_each_cpu_cond() function that wraps on_each_cpu_mask()
and calculates the cpumask of cpus to IPI by calling a function supplied
as a parameter in order to determine whether to IPI each specific cpu.
The function works around allocation failure of cpumask variable in
CONFIG_CPUMASK_OFFSTACK=y by itereating over cpus sending an IPI a
time via smp_call_function_single().
The function is useful since it allows to seperate the specific
code that decided in each case whether to IPI a specific cpu for
a specific request from the common boilerplate code of handling
creating the mask, handling failures etc.
Signed-off-by: Gilad Ben-Yossef <gilad@...yossef.com>
CC: Chris Metcalf <cmetcalf@...era.com>
CC: Christoph Lameter <cl@...ux-foundation.org>
CC: Peter Zijlstra <a.p.zijlstra@...llo.nl>
CC: Frederic Weisbecker <fweisbec@...il.com>
CC: Russell King <linux@....linux.org.uk>
CC: linux-mm@...ck.org
CC: Pekka Enberg <penberg@...nel.org>
CC: Matt Mackall <mpm@...enic.com>
CC: Sasha Levin <levinsasha928@...il.com>
CC: Rik van Riel <riel@...hat.com>
CC: Andi Kleen <andi@...stfloor.org>
CC: Alexander Viro <viro@...iv.linux.org.uk>
CC: linux-fsdevel@...r.kernel.org
CC: Avi Kivity <avi@...hat.com>
CC: Michal Nazarewicz <mina86@...a86.com>
CC: Kosaki Motohiro <kosaki.motohiro@...il.com>
CC: Andrew Morton <akpm@...ux-foundation.org>
CC: Milton Miller <miltonm@....com>
---
include/linux/smp.h | 19 ++++++++++++++++
kernel/smp.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 77 insertions(+), 0 deletions(-)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index d0adb78..e1ea702 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -109,6 +109,15 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
void *info, bool wait);
/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfpflags);
+
+/*
* Mark the boot cpu "online" so that it can call console drivers in
* printk() and can access its per-cpu storage.
*/
@@ -153,6 +162,16 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
local_irq_enable(); \
} \
} while (0)
+#define on_each_cpu_cond(cond_func, func, info, wait, gfpflags) \
+ do { \
+ preempt_disable(); \
+ if (cond_func(0, info)) { \
+ local_irq_disable(); \
+ (func)(info); \
+ local_irq_enable(); \
+ } \
+ preempt_enable(); \
+ } while (0)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
diff --git a/kernel/smp.c b/kernel/smp.c
index a081e6c..fa0912a 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -730,3 +730,61 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
put_cpu();
}
EXPORT_SYMBOL(on_each_cpu_mask);
+
+/*
+ * on_each_cpu_cond(): Call a function on each processor for which
+ * the supplied function cond_func returns true, optionally waiting
+ * for all the required CPUs to finish. This may include the local
+ * processor.
+ * @cond_func: A callback function that is passed a cpu id and
+ * the the info parameter. The function is called
+ * with preemption disabled. The function should
+ * return a blooean value indicating whether to IPI
+ * the specified CPU.
+ * @func: The function to run on all applicable CPUs.
+ * This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to both functions.
+ * @wait: If true, wait (atomically) until function has
+ * completed on other CPUs.
+ * @gfpflags: GFP flags to use when allocating the cpumask
+ * used internally by the function.
+ *
+ * The function might sleep if the GFP flags indicates a non
+ * atomic allocation is allowed.
+ *
+ * You must not call this function with disabled interrupts or
+ * from a hardware interrupt handler or from a bottom half handler.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfpflags)
+{
+ cpumask_var_t cpus;
+ int cpu, ret;
+
+ might_sleep_if(gfpflags & __GFP_WAIT);
+
+ if (likely(zalloc_cpumask_var(&cpus, (gfpflags|__GFP_NOWARN)))) {
+ preempt_disable();
+ for_each_online_cpu(cpu)
+ if (cond_func(cpu, info))
+ cpumask_set_cpu(cpu, cpus);
+ on_each_cpu_mask(cpus, func, info, wait);
+ preempt_enable();
+ free_cpumask_var(cpus);
+ } else {
+ /*
+ * No free cpumask, bother. No matter, we'll
+ * just have to IPI them one by one.
+ */
+ preempt_disable();
+ for_each_online_cpu(cpu)
+ if (cond_func(cpu, info)) {
+ ret = smp_call_function_single(cpu, func,
+ info, wait);
+ WARN_ON_ONCE(!ret);
+ }
+ preempt_enable();
+ }
+}
+EXPORT_SYMBOL(on_each_cpu_cond);
--
1.7.0.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists