Subject: sched: Add IRQ utilization tracking From: Peter Zijlstra Date: Mon Aug 7 17:22:46 CEST 2017 Track how much time we spend on IRQ... Signed-off-by: Peter Zijlstra (Intel) --- kernel/sched/core.c | 6 +++++- kernel/sched/fair.c | 12 ++++++++++++ kernel/sched/sched.h | 2 ++ 3 files changed, 19 insertions(+), 1 deletion(-) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -151,6 +151,8 @@ struct rq *task_rq_lock(struct task_stru } } +extern int update_irq_load_avg(u64 now, int cpu, struct rq *rq, int running); + /* * RQ-clock updating methods: */ @@ -204,8 +206,10 @@ static void update_rq_clock_task(struct rq->clock_task += delta; #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) - if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) + if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) { sched_rt_avg_update(rq, irq_delta + steal); + update_irq_load_avg(rq->clock, cpu_of(rq), rq, 1); + } #endif } --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3012,6 +3012,11 @@ __update_load_avg_cfs_rq(u64 now, int cp cfs_rq->curr != NULL, cfs_rq); } +int update_irq_load_avg(u64 now, int cpu, struct rq *rq, int running) +{ + return ___update_load_avg(now, cpu, &rq->irq_avg, 0, running, NULL); +} + int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running) { return ___update_load_avg(now, cpu, &rt_rq->avg, 0, running, NULL); @@ -3550,6 +3555,11 @@ update_cfs_rq_load_avg(u64 now, struct c return 0; } +int update_irq_load_avg(u64 now, int cpu, struct rq *rq, int running) +{ + return 0; +} + int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running) { return 0; @@ -6926,6 +6936,7 @@ static void update_blocked_averages(int rq_lock_irqsave(rq, &rf); update_rq_clock(rq); + update_irq_load_avg(rq_clock(rq), cpu, rq, 0); /* * Iterates the task_group tree in a bottom up fashion, see @@ -7014,6 +7025,7 @@ static inline void update_blocked_averag rq_lock_irqsave(rq, &rf); update_rq_clock(rq); + update_irq_load_avg(rq_clock(rq), cpu, rq, 0); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true); update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0); update_dl_rq_load_avg(rq_clock_task(rq), cpu, &rq->dl, 0); --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -748,6 +748,8 @@ struct rq { struct list_head cfs_tasks; + struct sched_avg irq_avg; + u64 rt_avg; u64 age_stamp; u64 idle_stamp;