[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231115033647.80785-5-wuyun.abel@bytedance.com>
Date: Wed, 15 Nov 2023 11:36:47 +0800
From: Abel Wu <wuyun.abel@...edance.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Valentin Schneider <valentin.schneider@....com>
Cc: Barry Song <21cnbao@...il.com>,
Benjamin Segall <bsegall@...gle.com>,
Chen Yu <yu.c.chen@...el.com>,
Daniel Jordan <daniel.m.jordan@...cle.com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>,
Joel Fernandes <joel@...lfernandes.org>,
K Prateek Nayak <kprateek.nayak@....com>,
Mike Galbraith <efault@....de>,
Qais Yousef <qyousef@...alina.io>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Yicong Yang <yangyicong@...wei.com>,
Youssef Esmat <youssefesmat@...omium.org>,
linux-kernel@...r.kernel.org, Abel Wu <wuyun.abel@...edance.com>
Subject: [PATCH v2 4/4] sched/stats: Add statistics for pick_eevdf()
For statistical purpose only, not intended to upstream.
Signed-off-by: Abel Wu <wuyun.abel@...edance.com>
---
kernel/sched/fair.c | 12 ++++++++++--
kernel/sched/sched.h | 5 +++++
kernel/sched/stats.c | 6 ++++--
3 files changed, 19 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4197258b76ab..94d9318ac484 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -881,13 +881,16 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
struct sched_entity *se = __pick_first_entity(cfs_rq);
struct sched_entity *curr = cfs_rq->curr;
struct sched_entity *best = NULL;
+ struct rq *rq = rq_of(cfs_rq);
/*
* We can safely skip eligibility check if there is only one entity
* in this cfs_rq, saving some cycles.
*/
- if (cfs_rq->nr_running == 1)
+ if (cfs_rq->nr_running == 1) {
+ schedstat_inc(rq->pick_direct);
return curr && curr->on_rq ? curr : se;
+ }
if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
curr = NULL;
@@ -896,15 +899,20 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
* Once selected, run a task until it either becomes non-eligible or
* until it gets a new slice. See the HACK in set_next_entity().
*/
- if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
+ if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline) {
+ schedstat_inc(rq->pick_parity);
return curr;
+ }
/* Pick the leftmost entity if it's eligible */
if (se && entity_eligible(cfs_rq, se)) {
+ schedstat_inc(rq->pick_fast);
best = se;
goto found;
}
+ schedstat_inc(rq->pick_slow);
+
/* Heap search for the EEVD entity */
while (node) {
struct rb_node *left = node->rb_left;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 539c7e763f15..85a79990a698 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1105,6 +1105,11 @@ struct rq {
/* try_to_wake_up() stats */
unsigned int ttwu_count;
unsigned int ttwu_local;
+
+ unsigned int pick_direct;
+ unsigned int pick_parity;
+ unsigned int pick_fast;
+ unsigned int pick_slow;
#endif
#ifdef CONFIG_CPU_IDLE
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 857f837f52cb..4b862c798989 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -133,12 +133,14 @@ static int show_schedstat(struct seq_file *seq, void *v)
/* runqueue-specific stats */
seq_printf(seq,
- "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
+ "cpu%d %u 0 %u %u %u %u %llu %llu %lu %u %u %u %u",
cpu, rq->yld_count,
rq->sched_count, rq->sched_goidle,
rq->ttwu_count, rq->ttwu_local,
rq->rq_cpu_time,
- rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
+ rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount,
+ rq->pick_direct, rq->pick_parity,
+ rq->pick_fast, rq->pick_slow);
seq_printf(seq, "\n");
--
2.37.3
Powered by blists - more mailing lists