Subject: [DEBUG PATCH] sched/fair: profile distribute_cfs_runtime() --- kernel/sched/fair.c | 17 +++++++++++++++++ kernel/sched/sched.h | 2 ++ 2 files changed, 19 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index da3f728b27725..e3546274a162d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5009,6 +5009,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data) cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) - cfs_rq->throttled_clock_pelt; + rq->unthrottled_cfs_rq++; + /* Re-enqueue the tasks that have been throttled at this level. */ list_for_each_entry_safe(p, tmp, &cfs_rq->throttled_limbo_list, throttle_node) { list_del_init(&p->throttle_node); @@ -5017,6 +5019,7 @@ static int tg_unthrottle_up(struct task_group *tg, void *data) * due to affinity change while p is throttled. */ enqueue_task_fair(rq_of(cfs_rq), p, ENQUEUE_WAKEUP); + rq->unthrottled_task++; } /* Add cfs_rq with load or one or more already running entities to the list */ @@ -5193,7 +5196,9 @@ static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) { struct cfs_rq *cfs_rq; u64 runtime, remaining = 1; + unsigned int unthrottled_rqs = 0, unthrottled_cfs_rq = 0, unthrottled_task = 0; + trace_printk("cpu%d: begins\n", raw_smp_processor_id()); rcu_read_lock(); list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, throttled_list) { @@ -5201,6 +5206,7 @@ static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) struct rq_flags rf; rq_lock_irqsave(rq, &rf); + rq->unthrottled_cfs_rq = rq->unthrottled_task = 0; if (!cfs_rq_throttled(cfs_rq)) goto next; @@ -5222,12 +5228,23 @@ static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) unthrottle_cfs_rq(cfs_rq); next: + trace_printk("cpu%d: cpu%d unthrottled_cfs_rq=%d/%d, unthrottled_task=%d/%d, remaining=%Lu\n", + raw_smp_processor_id(), cpu_of(rq), + rq->unthrottled_cfs_rq, unthrottled_cfs_rq, + rq->unthrottled_task, unthrottled_task, remaining); + + unthrottled_cfs_rq += rq->unthrottled_cfs_rq; + unthrottled_task += rq->unthrottled_task; + unthrottled_rqs++; + rq->unthrottled_cfs_rq = rq->unthrottled_task = 0; rq_unlock_irqrestore(rq, &rf); if (!remaining) break; } rcu_read_unlock(); + trace_printk("cpu%d: finishes: unthrottled_rqs=%u, unthrottled_cfs_rq=%u, unthrottled_task=%u\n", + raw_smp_processor_id(), unthrottled_rqs, unthrottled_cfs_rq, unthrottled_task); } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e0e05847855f0..bd3a11582d5b6 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1118,6 +1118,8 @@ struct rq { unsigned int core_forceidle_occupation; u64 core_forceidle_start; #endif + unsigned int unthrottled_cfs_rq; + unsigned int unthrottled_task; }; #ifdef CONFIG_FAIR_GROUP_SCHED -- 2.39.5