ftrace determined the preempt_count after preemption was disabled instead of the original preemption count. Signed-off-by: Carsten Emde Signed-off-by: Thomas Gleixner --- kernel/trace/trace.c | 54 +++++++++++++++++++------------------- kernel/trace/trace.h | 3 +- kernel/trace/trace_hist.c | 2 - kernel/trace/trace_irqsoff.c | 13 +++++---- kernel/trace/trace_sched_wakeup.c | 9 ++++-- 5 files changed, 45 insertions(+), 36 deletions(-) Index: linux-2.6.26/kernel/trace/trace.c =================================================================== --- linux-2.6.26.orig/kernel/trace/trace.c +++ linux-2.6.26/kernel/trace/trace.c @@ -833,12 +833,10 @@ tracing_get_trace_entry(struct trace_arr } static inline void -tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) +tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, + unsigned long pc) { struct task_struct *tsk = current; - unsigned long pc; - - pc = preempt_count(); entry->field.preempt_count = pc & 0xff; entry->field.pid = (tsk) ? tsk->pid : 0; @@ -852,7 +850,8 @@ tracing_generic_entry_update(struct trac void trace_function(struct trace_array *tr, struct trace_array_cpu *data, - unsigned long ip, unsigned long parent_ip, unsigned long flags) + unsigned long ip, unsigned long parent_ip, unsigned long flags, + unsigned long pc) { struct trace_entry *entry; unsigned long irq_flags; @@ -860,7 +859,7 @@ trace_function(struct trace_array *tr, s raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, pc); entry->type = TRACE_FN; entry->field.fn.ip = ip; entry->field.fn.parent_ip = parent_ip; @@ -873,7 +872,8 @@ ftrace(struct trace_array *tr, struct tr unsigned long ip, unsigned long parent_ip, unsigned long flags) { if (likely(!atomic_read(&data->disabled))) - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, + preempt_count()); } #ifdef CONFIG_MMIOTRACE @@ -887,7 +887,7 @@ void __trace_mmiotrace_rw(struct trace_a __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, 0); + tracing_generic_entry_update(entry, 0, preempt_count()); entry->type = TRACE_MMIO_RW; entry->field.mmiorw = *rw; @@ -907,7 +907,7 @@ void __trace_mmiotrace_map(struct trace_ __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, 0); + tracing_generic_entry_update(entry, 0, preempt_count()); entry->type = TRACE_MMIO_MAP; entry->field.mmiomap = *map; @@ -930,7 +930,7 @@ void __trace_stack(struct trace_array *t return; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_STACK; memset(&entry->field.stack, 0, sizeof(entry->field.stack)); @@ -955,7 +955,7 @@ __trace_special(void *__tr, void *__data raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, 0); + tracing_generic_entry_update(entry, 0, preempt_count()); entry->type = TRACE_SPECIAL; entry->field.special.arg1 = arg1; entry->field.special.arg2 = arg2; @@ -980,7 +980,7 @@ tracing_sched_switch_trace(struct trace_ raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_CTX; entry->field.ctx.prev_pid = prev->pid; entry->field.ctx.prev_prio = prev->prio; @@ -1007,7 +1007,7 @@ tracing_sched_wakeup_trace(struct trace_ raw_local_irq_save(irq_flags); __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_WAKE; entry->field.ctx.prev_pid = curr->pid; entry->field.ctx.prev_prio = curr->prio; @@ -1057,7 +1057,7 @@ void tracing_event_irq(struct trace_arra struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_IRQ; entry->field.irq.ip = ip; entry->field.irq.irq = irq; @@ -1076,7 +1076,7 @@ void tracing_event_fault(struct trace_ar struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_FAULT; entry->field.fault.ip = ip; entry->field.fault.ret_ip = retip; @@ -1093,7 +1093,7 @@ void tracing_event_timer_set(struct trac struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_TIMER_SET; entry->field.timer.ip = ip; entry->field.timer.expire = *expires; @@ -1109,7 +1109,7 @@ void tracing_event_program_event(struct struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_PROGRAM_EVENT; entry->field.program.ip = ip; entry->field.program.expire = *expires; @@ -1126,7 +1126,7 @@ void tracing_event_resched_task(struct t struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_RESCHED_TASK; entry->field.task.ip = ip; entry->field.task.prio = p->prio; @@ -1143,7 +1143,7 @@ void tracing_event_timer_triggered(struc struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_TIMER_TRIG; entry->field.timer.ip = ip; entry->field.timer.expire = *expired; @@ -1159,7 +1159,7 @@ void tracing_event_timestamp(struct trac struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_TIMESTAMP; entry->field.timestamp.ip = ip; entry->field.timestamp.now = *now; @@ -1175,7 +1175,7 @@ void tracing_event_task_activate(struct struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_TASK_ACT; entry->field.task.ip = ip; entry->field.task.pid = p->pid; @@ -1193,7 +1193,7 @@ void tracing_event_task_deactivate(struc struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_TASK_DEACT; entry->field.task.ip = ip; entry->field.task.pid = p->pid; @@ -1213,7 +1213,7 @@ void tracing_event_syscall(struct trace_ struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_SYSCALL; entry->field.syscall.ip = ip; entry->field.syscall.nr = nr; @@ -1231,7 +1231,7 @@ void tracing_event_sysret(struct trace_a struct trace_entry *entry; entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_SYSRET; entry->field.sysret.ip = ip; entry->field.sysret.ret = ret; @@ -1246,6 +1246,7 @@ function_trace_call(unsigned long ip, un unsigned long flags; long disabled; int cpu, resched; + unsigned long pc; if (unlikely(!ftrace_function_enabled)) return; @@ -1253,6 +1254,7 @@ function_trace_call(unsigned long ip, un if (skip_trace(ip)) return; + pc = preempt_count(); resched = ftrace_preempt_disable(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; @@ -1260,7 +1262,7 @@ function_trace_call(unsigned long ip, un if (likely(disabled == 1)) { local_save_flags(flags); - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, pc); } atomic_dec(&data->disabled); @@ -3531,7 +3533,7 @@ int __ftrace_printk(unsigned long ip, co __raw_spin_lock(&data->lock); entry = tracing_get_trace_entry(tr, data); - tracing_generic_entry_update(entry, flags); + tracing_generic_entry_update(entry, flags, preempt_count()); entry->type = TRACE_PRINT; entry->field.print.ip = ip; Index: linux-2.6.26/kernel/trace/trace.h =================================================================== --- linux-2.6.26.orig/kernel/trace/trace.h +++ linux-2.6.26/kernel/trace/trace.h @@ -322,7 +322,8 @@ void trace_function(struct trace_array * struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, - unsigned long flags); + unsigned long flags, + unsigned long pc); void tracing_event_irq(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, Index: linux-2.6.26/kernel/trace/trace_hist.c =================================================================== --- linux-2.6.26.orig/kernel/trace/trace_hist.c +++ linux-2.6.26/kernel/trace/trace_hist.c @@ -354,7 +354,7 @@ notrace void tracing_hist_preempt_stop(i cpu = raw_smp_processor_id(); #ifdef CONFIG_INTERRUPT_OFF_HIST - if (irqs_on && + if (irqs_on && per_cpu(hist_irqsoff_tracing, cpu)) { stop = ftrace_now(cpu); stop_set++; Index: linux-2.6.26/kernel/trace/trace_irqsoff.c =================================================================== --- linux-2.6.26.orig/kernel/trace/trace_irqsoff.c +++ linux-2.6.26/kernel/trace/trace_irqsoff.c @@ -99,7 +99,8 @@ irqsoff_tracer_call(unsigned long ip, un disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, + preempt_count()); atomic_dec(&data->disabled); } @@ -154,7 +155,8 @@ check_critical_timing(struct trace_array if (!report_latency(delta)) goto out_unlock; - trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); + trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, + preempt_count()); latency = nsecs_to_usecs(delta); @@ -178,7 +180,8 @@ out: data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); tracing_reset(data); - trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); + trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, + preempt_count()); } static inline void @@ -211,7 +214,7 @@ start_critical_timing(unsigned long ip, local_save_flags(flags); - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, preempt_count()); per_cpu(tracing_cpu, cpu) = 1; @@ -245,7 +248,7 @@ stop_critical_timing(unsigned long ip, u atomic_inc(&data->disabled); local_save_flags(flags); - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, preempt_count()); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; atomic_dec(&data->disabled); Index: linux-2.6.26/kernel/trace/trace_sched_wakeup.c =================================================================== --- linux-2.6.26.orig/kernel/trace/trace_sched_wakeup.c +++ linux-2.6.26/kernel/trace/trace_sched_wakeup.c @@ -44,10 +44,12 @@ wakeup_tracer_call(unsigned long ip, uns long disabled; int resched; int cpu; + unsigned long pc; if (likely(!wakeup_task) || !ftrace_enabled) return; + pc = preempt_count(); resched = ftrace_preempt_disable(); cpu = raw_smp_processor_id(); @@ -71,7 +73,7 @@ wakeup_tracer_call(unsigned long ip, uns /* interrupts are disabled, no worry about scheduling */ preempt_enable_no_resched_notrace(); - trace_function(tr, data, ip, parent_ip, flags); + trace_function(tr, data, ip, parent_ip, flags, pc); preempt_disable_notrace(); unlock: @@ -149,7 +151,8 @@ probe_wakeup_sched_switch(struct rq *rq, if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; - trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags); + trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, + preempt_count()); /* * usecs conversion is slow so we try to delay the conversion @@ -250,7 +253,7 @@ probe_wakeup(struct rq *rq, struct task_ wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], - CALLER_ADDR1, CALLER_ADDR2, flags); + CALLER_ADDR1, CALLER_ADDR2, flags, preempt_count()); out_locked: __raw_spin_unlock(&wakeup_lock); -- -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/