From: "Steven Rostedt (VMware)" Adding a hook into free_reserve_area() that informs ftrace that boot up init text is being free, lets ftrace safely remove those init functions from its records, which keeps ftrace from trying to modify text that no longer exists. Note, this still does not allow for tracing .init text of modules, as modules require different work for freeing its init code. Link: http://lkml.kernel.org/r/1488502497.7212.24.camel@linux.intel.com Cc: linux-mm@kvack.org Cc: Vlastimil Babka Cc: Mel Gorman Cc: Peter Zijlstra Requested-by: Todd Brandt Signed-off-by: Steven Rostedt (VMware) --- include/linux/ftrace.h | 3 +++ include/linux/init.h | 4 +++- kernel/trace/ftrace.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 4 ++++ scripts/recordmcount.c | 1 + scripts/recordmcount.pl | 1 + 6 files changed, 56 insertions(+), 1 deletion(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 569db5589851..25407b5553c3 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -249,6 +249,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op, struct pt_regs *regs); +void ftrace_free_mem(void *start, void *end); + #else /* !CONFIG_FUNCTION_TRACER */ /* * (un)register_ftrace_function must be a macro since the ops parameter @@ -262,6 +264,7 @@ static inline int ftrace_nr_registered_ops(void) } static inline void clear_ftrace_function(void) { } static inline void ftrace_kill(void) { } +static inline void ftrace_free_mem(void *start, void *end) { } #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_STACK_TRACER diff --git a/include/linux/init.h b/include/linux/init.h index 885c3e6d0f9d..c119e76f6d6e 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -39,7 +39,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(.init.text) __cold notrace __latent_entropy +#define __init __section(.init.text) __cold __inittrace __latent_entropy #define __initdata __section(.init.data) #define __initconst __section(.init.rodata) #define __exitdata __section(.exit.data) @@ -68,8 +68,10 @@ #ifdef MODULE #define __exitused +#define __inittrace notrace #else #define __exitused __used +#define __inittrace #endif #define __exit __section(.exit.text) __exitused __cold notrace diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d129ae51329a..4c2d751eb886 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5251,6 +5251,50 @@ void ftrace_module_enable(struct module *mod) mutex_unlock(&ftrace_lock); } +void ftrace_free_mem(void *start_ptr, void *end_ptr) +{ + unsigned long start = (unsigned long)start_ptr; + unsigned long end = (unsigned long)end_ptr; + struct ftrace_page **last_pg = &ftrace_pages_start; + struct ftrace_page *pg; + struct dyn_ftrace *rec; + struct dyn_ftrace key; + int order; + + key.ip = start; + key.flags = end; /* overload flags, as it is unsigned long */ + + mutex_lock(&ftrace_lock); + + for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { + if (end < pg->records[0].ip || + start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) + continue; + again: + rec = bsearch(&key, pg->records, pg->index, + sizeof(struct dyn_ftrace), + ftrace_cmp_recs); + if (!rec) + continue; + pg->index--; + if (!pg->index) { + *last_pg = pg->next; + order = get_count_order(pg->size / ENTRIES_PER_PAGE); + free_pages((unsigned long)pg->records, order); + kfree(pg); + pg = container_of(last_pg, struct ftrace_page, next); + if (!(*last_pg)) + ftrace_pages = pg; + continue; + } + memmove(rec, rec + 1, + (pg->index - (rec - pg->records)) * sizeof(*rec)); + /* More than one function may be in this block */ + goto again; + } + mutex_unlock(&ftrace_lock); +} + void ftrace_module_init(struct module *mod) { if (ftrace_disabled || !mod->num_ftrace_callsites) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2c6d5f64feca..95ac03de4cda 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -64,6 +64,7 @@ #include #include #include +#include #include #include @@ -6441,6 +6442,9 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s) void *pos; unsigned long pages = 0; + /* This may be .init text, inform ftrace to remove it */ + ftrace_free_mem(start, end); + start = (void *)PAGE_ALIGN((unsigned long)start); end = (void *)((unsigned long)end & PAGE_MASK); for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index aeb34223167c..16e086dcc567 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -412,6 +412,7 @@ static int is_mcounted_section_name(char const *const txtname) { return strcmp(".text", txtname) == 0 || + strcmp(".init.text", txtname) == 0 || strcmp(".ref.text", txtname) == 0 || strcmp(".sched.text", txtname) == 0 || strcmp(".spinlock.text", txtname) == 0 || diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index faac4b10d8ea..328590d58eee 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -130,6 +130,7 @@ if ($inputfile =~ m,kernel/trace/ftrace\.o$,) { # Acceptable sections to record. my %text_sections = ( ".text" => 1, + ".init.text" => 1, ".ref.text" => 1, ".sched.text" => 1, ".spinlock.text" => 1, -- 2.10.2