Subject: sched: Add DL utilization tracking From: Peter Zijlstra Date: Mon Aug 7 16:49:48 CEST 2017 Track how much time we spend running DL tasks on avgerage. Signed-off-by: Peter Zijlstra (Intel) --- kernel/sched/deadline.c | 11 ++++++++++- kernel/sched/fair.c | 12 ++++++++++++ kernel/sched/sched.h | 2 ++ 3 files changed, 24 insertions(+), 1 deletion(-) --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1655,6 +1655,8 @@ static struct sched_dl_entity *pick_next return rb_entry(left, struct sched_dl_entity, rb_node); } +extern int update_dl_rq_load_avg(u64 now, int cpu, struct dl_rq *dl_rq, int running); + struct task_struct * pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { @@ -1702,19 +1704,25 @@ pick_next_task_dl(struct rq *rq, struct p->se.exec_start = rq_clock_task(rq); /* Running task will never be pushed. */ - dequeue_pushable_dl_task(rq, p); + dequeue_pushable_dl_task(rq, p); if (hrtick_enabled(rq)) start_hrtick_dl(rq, p); queue_push_tasks(rq); + if (p) { + update_dl_rq_load_avg(rq_clock_task(rq), cpu_of(rq), dl_rq, + rq->curr->sched_class == &dl_sched_class); + } + return p; } static void put_prev_task_dl(struct rq *rq, struct task_struct *p) { update_curr_dl(rq); + update_dl_rq_load_avg(rq_clock_task(rq), cpu_of(rq), &rq->dl, 1); if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) enqueue_pushable_dl_task(rq, p); @@ -1723,6 +1731,7 @@ static void put_prev_task_dl(struct rq * static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) { update_curr_dl(rq); + update_dl_rq_load_avg(rq_clock_task(rq), cpu_of(rq), &rq->dl, 1); /* * Even when we have runtime, update_curr_dl() might have resulted in us --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3017,6 +3017,11 @@ int update_rt_rq_load_avg(u64 now, int c return ___update_load_avg(now, cpu, &rt_rq->avg, 0, running, NULL); } +int update_dl_rq_load_avg(u64 now, int cpu, struct dl_rq *dl_rq, int running) +{ + return ___update_load_avg(now, cpu, &dl_rq->avg, 0, running, NULL); +} + /* * Signed add and clamp on underflow. @@ -3550,6 +3555,11 @@ int update_rt_rq_load_avg(u64 now, int c return 0; } +int update_dl_rq_load_avg(u64 now, int cpu, struct dl_rq *dl_rq, int running) +{ + return 0; +} + #define UPDATE_TG 0x0 #define SKIP_AGE_LOAD 0x0 @@ -6945,6 +6955,7 @@ static void update_blocked_averages(int } update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0); + update_dl_rq_load_avg(rq_clock_task(rq), cpu, &rq->dl, 0); rq_unlock_irqrestore(rq, &rf); } @@ -7005,6 +7016,7 @@ static inline void update_blocked_averag update_rq_clock(rq); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true); update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0); + update_dl_rq_load_avg(rq_clock_task(rq), cpu, &rq->dl, 0); rq_unlock_irqrestore(rq, &rf); } --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -580,6 +580,8 @@ struct dl_rq { */ struct rb_root pushable_dl_tasks_root; struct rb_node *pushable_dl_tasks_leftmost; + + struct sched_avg avg; #else struct dl_bw dl_bw; #endif