[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190612193227.993-3-riel@surriel.com>
Date: Wed, 12 Jun 2019 15:32:21 -0400
From: Rik van Riel <riel@...riel.com>
To: peterz@...radead.org
Cc: mingo@...hat.com, linux-kernel@...r.kernel.org, kernel-team@...com,
morten.rasmussen@....com, tglx@...utronix.de,
dietmar.eggeman@....com, mgorman@...hsingularity.com,
vincent.guittot@...aro.org, Rik van Riel <riel@...riel.com>
Subject: [PATCH 2/8] sched: change /proc/sched_debug fields
Remove some fields from /proc/sched_debug that are removed from
sched_entity in a subsequent patch, and add h_load, which comes in
very handy to debug CPU controller weight distribution.
Signed-off-by: Rik van Riel <riel@...riel.com>
---
kernel/sched/debug.c | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 678bfb9bd87f..aab4640d66c5 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -419,11 +419,9 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
}
P(se->load.weight);
- P(se->runnable_weight);
#ifdef CONFIG_SMP
P(se->avg.load_avg);
P(se->avg.util_avg);
- P(se->avg.runnable_load_avg);
#endif
#undef PN_SCHEDSTAT
@@ -541,7 +539,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_SMP
- SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
cfs_rq->avg.load_avg);
SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
@@ -550,17 +547,15 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->avg.util_avg);
SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
cfs_rq->avg.util_est.enqueued);
- SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
- cfs_rq->removed.load_avg);
SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
cfs_rq->removed.util_avg);
- SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_sum",
- cfs_rq->removed.runnable_sum);
#ifdef CONFIG_FAIR_GROUP_SCHED
SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
cfs_rq->tg_load_avg_contrib);
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
atomic_long_read(&cfs_rq->tg->load_avg));
+ SEQ_printf(m, " .%-30s: %lu\n", "h_load",
+ cfs_rq->h_load);
#endif
#endif
#ifdef CONFIG_CFS_BANDWIDTH
@@ -964,10 +959,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
"nr_involuntary_switches", (long long)p->nivcsw);
P(se.load.weight);
- P(se.runnable_weight);
#ifdef CONFIG_SMP
P(se.avg.load_sum);
- P(se.avg.runnable_load_sum);
P(se.avg.util_sum);
P(se.avg.load_avg);
P(se.avg.runnable_load_avg);
--
2.20.1
Powered by blists - more mailing lists