ftrace instrumentation for RT tasks From: Gregory Haskins Signed-off-by: Gregory Haskins --- arch/x86/kernel/smp.c | 2 ++ include/linux/sched.h | 6 ++++++ kernel/sched.c | 3 +++ kernel/sched_rt.c | 10 ++++++++++ 4 files changed, 21 insertions(+), 0 deletions(-) diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index e6faa33..468abeb 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -118,6 +118,7 @@ static void native_smp_send_reschedule(int cpu) WARN_ON(1); return; } + ftrace_printk("cpu %d\n", cpu); send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); } @@ -171,6 +172,7 @@ static void native_smp_send_stop(void) */ void smp_reschedule_interrupt(struct pt_regs *regs) { + ftrace_printk("NEEDS_RESCHED\n"); ack_APIC_irq(); inc_irq_stat(irq_resched_count); } diff --git a/include/linux/sched.h b/include/linux/sched.h index 4cae9b8..a320692 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2094,8 +2094,14 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) return test_ti_thread_flag(task_thread_info(tsk), flag); } +# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt) +extern int +__ftrace_printk(unsigned long ip, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); + static inline void set_tsk_need_resched(struct task_struct *tsk) { + ftrace_printk("%s/%d\n", tsk->comm, tsk->pid); set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); } diff --git a/kernel/sched.c b/kernel/sched.c index 52bbf1c..d55fcf1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1874,6 +1874,9 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); u64 clock_offset; + ftrace_printk("migrate %s/%d [%d] -> [%d]\n", + p->comm, p->pid, task_cpu(p), new_cpu); + clock_offset = old_rq->clock - new_rq->clock; trace_sched_migrate_task(p, task_cpu(p), new_cpu); diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 954e1a8..59cf64b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1102,6 +1102,8 @@ static int push_rt_task(struct rq *rq) if (!next_task) return 0; + ftrace_printk("attempting push\n"); + retry: if (unlikely(next_task == rq->curr)) { WARN_ON(1); @@ -1139,6 +1141,8 @@ static int push_rt_task(struct rq *rq) goto out; } + ftrace_printk("%s/%d\n", next_task->comm, next_task->pid); + deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); activate_task(lowest_rq, next_task, 0); @@ -1180,6 +1184,8 @@ static int pull_rt_task(struct rq *this_rq) if (likely(!rt_overloaded(this_rq))) return 0; + ftrace_printk("attempting pull\n"); + next = pick_next_task_rt(this_rq); for_each_cpu(cpu, this_rq->rd->rto_mask) { @@ -1234,6 +1240,10 @@ static int pull_rt_task(struct rq *this_rq) ret = 1; + ftrace_printk("pull %s/%d [%d] -> [%d]\n", + p->comm, p->pid, + src_rq->cpu, this_rq->cpu); + deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0);