[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4F413CE0-A857-4350-AE04-176AA8ADC44E@gmail.com>
Date: Mon, 3 Aug 2015 17:15:53 +0800
From: yalin wang <yalin.wang2010@...il.com>
To: Minfei Huang <mnfhuang@...il.com>
Cc: tj@...nel.org, rostedt@...dmis.org, mingo@...hat.com,
linux-kernel@...r.kernel.org, mhuang@...hat.com
Subject: Re: [PATCH] workqueue: Add the allocation flags to function schedule_on_each_cpu_gfp
> On Aug 3, 2015, at 16:27, Minfei Huang <mnfhuang@...il.com> wrote:
>
> Rename the function schedule_on_each_cpu to schedule_on_each_cpu_gfp to
> add the allocation flags as parameter.
>
> In several situation in ftrace, we are nervous and never come back, once
> schedule_on_each_cpu fails to alloc the percpu work. Add the allocation
> flags __GFP_NOFAIL to guarantee it.
>
> Signed-off-by: Minfei Huang <mnfhuang@...il.com>
> ---
> arch/x86/platform/uv/uv_time.c | 2 +-
> include/linux/ftrace.h | 2 +-
> include/linux/workqueue.h | 2 +-
> kernel/trace/ftrace.c | 5 +++--
> kernel/trace/trace_events.c | 2 +-
> kernel/workqueue.c | 11 ++++++-----
> 6 files changed, 13 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
> index a244237..a87a16a 100644
> --- a/arch/x86/platform/uv/uv_time.c
> +++ b/arch/x86/platform/uv/uv_time.c
> @@ -405,7 +405,7 @@ static __init int uv_rtc_setup_clock(void)
> clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
> (NSEC_PER_SEC / sn_rtc_cycles_per_second);
>
> - rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
> + rc = schedule_on_each_cpu_gfp(uv_rtc_register_clockevents, GFP_KERNEL);
> if (rc) {
> x86_platform_ipi_callback = NULL;
> uv_rtc_deallocate_timers();
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 6cd8c0e..d6d3cf5 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -154,7 +154,7 @@ struct ftrace_ops_hash {
> *
> * Any private data added must also take care not to be freed and if private
> * data is added to a ftrace_ops that is in core code, the user of the
> - * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
> + * ftrace_ops must perform a schedule_on_each_cpu_gfp() before freeing it.
> */
> struct ftrace_ops {
> ftrace_func_t func;
> diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
> index 738b30b..2de50fe 100644
> --- a/include/linux/workqueue.h
> +++ b/include/linux/workqueue.h
> @@ -436,7 +436,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
> extern void flush_workqueue(struct workqueue_struct *wq);
> extern void drain_workqueue(struct workqueue_struct *wq);
>
> -extern int schedule_on_each_cpu(work_func_t func);
> +extern int schedule_on_each_cpu_gfp(work_func_t func, gfp_t gfp);
>
> int execute_in_process_context(work_func_t fn, struct execute_work *);
>
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index eb11011..f8d3111 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -324,7 +324,7 @@ static void update_ftrace_function(void)
> * Make sure all CPUs see this. Yes this is slow, but static
> * tracing is slow and nasty to have enabled.
> */
> - schedule_on_each_cpu(ftrace_sync);
> + schedule_on_each_cpu_gfp(ftrace_sync, GFP_KERNEL | __GFP_NOFAIL);
> /* Now all cpus are using the list ops. */
> function_trace_op = set_function_trace_op;
> /* Make sure the function_trace_op is visible on all CPUs */
> @@ -2716,7 +2716,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
> * ourselves.
> */
> if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
> - schedule_on_each_cpu(ftrace_sync);
> + schedule_on_each_cpu_gfp(ftrace_sync,
> + GFP_KERNEL | __GFP_NOFAIL);
>
> arch_ftrace_trampoline_free(ops);
>
> diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
> index 404a372..6cf0dba 100644
> --- a/kernel/trace/trace_events.c
> +++ b/kernel/trace/trace_events.c
> @@ -2722,7 +2722,7 @@ static __init int event_test_thread(void *unused)
> if (!test_malloc)
> pr_info("failed to kmalloc\n");
>
> - schedule_on_each_cpu(test_work);
> + schedule_on_each_cpu_gfp(test_work, GFP_KERNEL);
>
> kfree(test_malloc);
>
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 4c4f061..f7ef6bb 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -2917,22 +2917,23 @@ bool cancel_delayed_work_sync(struct delayed_work *dwork)
> EXPORT_SYMBOL(cancel_delayed_work_sync);
>
> /**
> - * schedule_on_each_cpu - execute a function synchronously on each online CPU
> + * schedule_on_each_cpu_gfp - execute function synchronously on each online CPU
> * @func: the function to call
> + * @gfp: allocation flags
> *
> - * schedule_on_each_cpu() executes @func on each online CPU using the
> + * schedule_on_each_cpu_gfp() executes @func on each online CPU using the
> * system workqueue and blocks until all CPUs have completed.
> - * schedule_on_each_cpu() is very slow.
> + * schedule_on_each_cpu_gfp() is very slow.
> *
> * Return:
> * 0 on success, -errno on failure.
> */
> -int schedule_on_each_cpu(work_func_t func)
> +int schedule_on_each_cpu_gfp(work_func_t func, gfp_t gfp)
> {
> int cpu;
> struct work_struct __percpu *works;
>
> - works = alloc_percpu(struct work_struct);
> + works = alloc_percpu_gfp(struct work_struct, gfp);
> if (!works)
> return -ENOMEM;
>
> --
> 2.4.0
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@...r.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
better to also provide a wrapper function with name schedule_on_each_cpu(),
as this function is used frequently .
#define schedule_on_each_cpu(f) schedule_on_each_cpu_gfp(f, GFP_KERNEL)
Thanks
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists