From: "Steven Rostedt (VMware)" Pass the trace_array associated to a ftrace_probe_ops into the probe_ops func(), init() and free() functions. The trace_array is the descriptor that describes a tracing instance. This will help create the infrastructure that will allow having function probes unique to tracing instances. Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 13 +++++++++---- kernel/trace/trace.c | 14 ++++++++------ kernel/trace/trace.h | 3 +++ kernel/trace/trace_events.c | 16 +++++++++------- kernel/trace/trace_functions.c | 35 ++++++++++++++++++++++------------- 5 files changed, 51 insertions(+), 30 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ea208e93f000..e51cd6b51253 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3791,6 +3791,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { struct ftrace_probe_ops *probe_ops; + struct trace_array *tr = op->private; probe_ops = container_of(op, struct ftrace_probe_ops, ops); @@ -3800,7 +3801,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, * on the hash. rcu_read_lock is too dangerous here. */ preempt_disable_notrace(); - probe_ops->func(ip, parent_ip, probe_ops, NULL); + probe_ops->func(ip, parent_ip, tr, probe_ops, NULL); preempt_enable_notrace(); } @@ -3969,6 +3970,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, ops->ops.func = function_trace_probe_call; ftrace_ops_init(&ops->ops); INIT_LIST_HEAD(&ops->list); + ops->ops.private = tr; } mutex_lock(&ops->ops.func_hash->regex_lock); @@ -3997,7 +3999,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, * to give the caller an opportunity to do so. */ if (ops->init) { - ret = ops->init(ops, entry->ip, data); + ret = ops->init(ops, tr, entry->ip, data); if (ret < 0) goto out; } @@ -4038,7 +4040,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, hlist_for_each_entry(entry, &hash->buckets[i], hlist) { if (ftrace_lookup_ip(old_hash, entry->ip)) continue; - ops->free(ops, entry->ip, NULL); + ops->free(ops, tr, entry->ip, NULL); } } goto out_unlock; @@ -4055,6 +4057,7 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) struct ftrace_hash *hash = NULL; struct hlist_node *tmp; struct hlist_head hhd; + struct trace_array *tr; char str[KSYM_SYMBOL_LEN]; int i, ret; int size; @@ -4062,6 +4065,8 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) if (!(ops->ops.flags & FTRACE_OPS_FL_INITIALIZED)) return -EINVAL; + tr = ops->ops.private; + if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) func_g.search = NULL; else if (glob) { @@ -4139,7 +4144,7 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { hlist_del(&entry->hlist); if (ops->free) - ops->free(ops, entry->ip, NULL); + ops->free(ops, tr, entry->ip, NULL); kfree(entry); } mutex_unlock(&ftrace_lock); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 86598293787a..368310e78d45 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6736,14 +6736,16 @@ static const struct file_operations tracing_dyn_info_fops = { #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) static void ftrace_snapshot(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { tracing_snapshot(); } static void ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { struct ftrace_func_mapper *mapper = ops->private_data; long *count = NULL; @@ -6785,8 +6787,8 @@ ftrace_snapshot_print(struct seq_file *m, unsigned long ip, } static int -ftrace_snapshot_init(struct ftrace_probe_ops *ops, unsigned long ip, - void *data) +ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, + unsigned long ip, void *data) { struct ftrace_func_mapper *mapper = ops->private_data; @@ -6794,8 +6796,8 @@ ftrace_snapshot_init(struct ftrace_probe_ops *ops, unsigned long ip, } static void -ftrace_snapshot_free(struct ftrace_probe_ops *ops, unsigned long ip, - void **_data) +ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, + unsigned long ip, void **data) { struct ftrace_func_mapper *mapper = ops->private_data; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 68ff25e4cb19..390761804886 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -943,11 +943,14 @@ struct ftrace_probe_ops { struct list_head list; void (*func)(unsigned long ip, unsigned long parent_ip, + struct trace_array *tr, struct ftrace_probe_ops *ops, void **data); int (*init)(struct ftrace_probe_ops *ops, + struct trace_array *tr, unsigned long ip, void *data); void (*free)(struct ftrace_probe_ops *ops, + struct trace_array *tr, unsigned long ip, void **data); int (*print)(struct seq_file *m, unsigned long ip, diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f0d6e5aef53e..713bec614312 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2470,7 +2470,8 @@ static void update_event_probe(struct event_probe_data *data) static void event_enable_probe(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **_data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **_data) { struct ftrace_func_mapper *mapper = ops->private_data; struct event_probe_data *data; @@ -2486,7 +2487,8 @@ event_enable_probe(unsigned long ip, unsigned long parent_ip, static void event_enable_count_probe(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **_data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **_data) { struct ftrace_func_mapper *mapper = ops->private_data; struct event_probe_data *data; @@ -2513,7 +2515,7 @@ event_enable_count_probe(unsigned long ip, unsigned long parent_ip, static int event_enable_print(struct seq_file *m, unsigned long ip, - struct ftrace_probe_ops *ops, void *_data) + struct ftrace_probe_ops *ops, void *_data) { struct ftrace_func_mapper *mapper = ops->private_data; struct event_probe_data *data; @@ -2542,8 +2544,8 @@ event_enable_print(struct seq_file *m, unsigned long ip, } static int -event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip, - void *_data) +event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, + unsigned long ip, void *_data) { struct ftrace_func_mapper *mapper = ops->private_data; struct event_probe_data *data = _data; @@ -2559,8 +2561,8 @@ event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip, } static void -event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip, - void **_data) +event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, + unsigned long ip, void **_data) { struct ftrace_func_mapper *mapper = ops->private_data; struct event_probe_data *data; diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 2c8961b35401..797f087183c5 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -328,21 +328,24 @@ static void update_traceon_count(struct ftrace_probe_ops *ops, static void ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { update_traceon_count(ops, ip, 1); } static void ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { update_traceon_count(ops, ip, 0); } static void ftrace_traceon(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { if (tracing_is_on()) return; @@ -352,7 +355,8 @@ ftrace_traceon(unsigned long ip, unsigned long parent_ip, static void ftrace_traceoff(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { if (!tracing_is_on()) return; @@ -371,14 +375,16 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, static void ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { trace_dump_stack(STACK_SKIP); } static void ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { struct ftrace_func_mapper *mapper = ops->private_data; long *count; @@ -436,7 +442,8 @@ static int update_count(struct ftrace_probe_ops *ops, unsigned long ip) static void ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { if (update_count(ops, ip)) ftrace_dump(DUMP_ALL); @@ -445,7 +452,8 @@ ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, /* Only dump the current CPU buffer. */ static void ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, - struct ftrace_probe_ops *ops, void **data) + struct trace_array *tr, struct ftrace_probe_ops *ops, + void **data) { if (update_count(ops, ip)) ftrace_dump(DUMP_ORIG); @@ -473,7 +481,8 @@ ftrace_probe_print(const char *name, struct seq_file *m, static int ftrace_traceon_print(struct seq_file *m, unsigned long ip, - struct ftrace_probe_ops *ops, void *data) + struct ftrace_probe_ops *ops, + void *data) { return ftrace_probe_print("traceon", m, ip, ops); } @@ -508,8 +517,8 @@ ftrace_cpudump_print(struct seq_file *m, unsigned long ip, static int -ftrace_count_init(struct ftrace_probe_ops *ops, unsigned long ip, - void *data) +ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr, + unsigned long ip, void *data) { struct ftrace_func_mapper *mapper = ops->private_data; @@ -517,8 +526,8 @@ ftrace_count_init(struct ftrace_probe_ops *ops, unsigned long ip, } static void -ftrace_count_free(struct ftrace_probe_ops *ops, unsigned long ip, - void **_data) +ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr, + unsigned long ip, void **_data) { struct ftrace_func_mapper *mapper = ops->private_data; -- 2.10.2