Index: linux-trace.git/arch/x86/kernel/ftrace.c =================================================================== --- linux-trace.git.orig/arch/x86/kernel/ftrace.c +++ linux-trace.git/arch/x86/kernel/ftrace.c @@ -687,12 +687,18 @@ static unsigned char *ftrace_jmp_replace /* Module allocation simplifies allocating memory for code */ static inline void *alloc_tramp(unsigned long size) { - return module_alloc(size); + void *ret; + + ret = module_alloc(size); + printk("alloc tramp %p %ld\n", ret, size); + return ret; + } static inline void tramp_free(void *tramp, int size) { int npages = PAGE_ALIGN(size) >> PAGE_SHIFT; + printk("free tramp %p %d\n", tramp, size); set_memory_nx((unsigned long)tramp, npages); set_memory_rw((unsigned long)tramp, npages); module_memfree(tramp); @@ -947,6 +953,7 @@ void arch_ftrace_trampoline_free(struct if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) return; + printk("free %pS at %p size=%d\n", ops, (void *)ops->trampoline, ops->trampoline_size); tramp_free((void *)ops->trampoline, ops->trampoline_size); ops->trampoline = 0; } Index: linux-trace.git/kernel/trace/ftrace.c =================================================================== --- linux-trace.git.orig/kernel/trace/ftrace.c +++ linux-trace.git/kernel/trace/ftrace.c @@ -2801,10 +2801,12 @@ static int ftrace_shutdown(struct ftrace if (unlikely(ftrace_disabled)) return -ENODEV; + printk("unreg %pS %d\n", ops, __LINE__); ret = __unregister_ftrace_function(ops); if (ret) return ret; + printk("unreg %pS %d\n", ops, __LINE__); ftrace_start_up--; /* * Just warn in case of unbalance, no need to kill ftrace, it's not @@ -2828,17 +2830,20 @@ static int ftrace_shutdown(struct ftrace if (!command || !ftrace_enabled) { /* - * If these are dynamic or per_cpu ops, they still - * need their data freed. Since, function tracing is + * If these are per_cpu ops, they still need their + * per_cpu field freed. Since, function tracing is * not currently active, we can just free them * without synchronizing all CPUs. */ - if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) - goto free_ops; - + printk("%pS ops->flags=%x tramp=%pS (leaving)\n", ops, ops->flags, + (void *)ops->trampoline); + if (ops->flags & FTRACE_OPS_FL_PER_CPU) + per_cpu_ops_free(ops); + printk("unreg %pS %d leaving\n", ops, __LINE__); return 0; } + printk("unreg %pS %d\n", ops, __LINE__); /* * If the ops uses a trampoline, then it needs to be * tested first on update. @@ -2880,6 +2885,7 @@ static int ftrace_shutdown(struct ftrace * The same goes for freeing the per_cpu data of the per_cpu * ops. */ + printk("%pS ops->flags=%x\n", ops, ops->flags); if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) { /* * We need to do a hard force of sched synchronization. @@ -2901,7 +2907,6 @@ static int ftrace_shutdown(struct ftrace if (IS_ENABLED(CONFIG_PREEMPT)) synchronize_rcu_tasks(); - free_ops: arch_ftrace_trampoline_free(ops); if (ops->flags & FTRACE_OPS_FL_PER_CPU) @@ -5535,6 +5540,7 @@ void ftrace_create_filter_files(struct f void ftrace_destroy_filter_files(struct ftrace_ops *ops) { mutex_lock(&ftrace_lock); + printk("destroy %pS %x (%x)\n", ops, ops->flags, ops->flags & FTRACE_OPS_FL_ENABLED); if (ops->flags & FTRACE_OPS_FL_ENABLED) ftrace_shutdown(ops, 0); ops->flags |= FTRACE_OPS_FL_DELETED; Index: linux-trace.git/kernel/trace/trace_selftest.c =================================================================== --- linux-trace.git.orig/kernel/trace/trace_selftest.c +++ linux-trace.git/kernel/trace/trace_selftest.c @@ -257,6 +257,7 @@ static int trace_selftest_ops(struct tra dyn_ops->func = trace_selftest_test_dyn_func; + printk("reg dynamic ops\n"); register_ftrace_function(dyn_ops); trace_selftest_test_global_cnt = 0; @@ -291,6 +292,7 @@ static int trace_selftest_ops(struct tra ret = 0; out_free: + printk("free dyn_ops\n"); unregister_ftrace_function(dyn_ops); kfree(dyn_ops);