[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a0245eb6-d7ff-cae5-3608-d46424fa437d@huawei.com>
Date: Thu, 3 Dec 2020 14:47:14 +0800
From: Yunfeng Ye <yeyunfeng@...wei.com>
To: <mingo@...hat.com>, <peterz@...radead.org>,
<juri.lelli@...hat.com>, <vincent.guittot@...aro.org>,
<dietmar.eggemann@....com>, <rostedt@...dmis.org>,
<bsegall@...gle.com>, <mgorman@...e.de>, <bristot@...hat.com>,
<linux-kernel@...r.kernel.org>
CC: Shiyuan Hu <hushiyuan@...wei.com>,
Hewenliang <hewenliang4@...wei.com>
Subject: [PATCH 2/2] sched: Split the function show_schedstat()
The schedstat include runqueue-specific stats and domain-specific stats,
so split it into two functions, show_rqstat() and show_domainstat().
No functional changes.
Signed-off-by: Yunfeng Ye <yeyunfeng@...wei.com>
---
kernel/sched/stats.c | 101 +++++++++++++++++++++++--------------------
1 file changed, 54 insertions(+), 47 deletions(-)
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index e99403df3f90..ef75e7d64d32 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -12,61 +12,68 @@
*/
#define SCHEDSTAT_VERSION 15
-static int show_schedstat(struct seq_file *seq, void *v)
+static void show_domainstat(struct seq_file *seq, int cpu)
+{
+#ifdef CONFIG_SMP
+ struct sched_domain *sd;
+ int dcount = 0;
+
+ /* domain-specific stats */
+ rcu_read_lock();
+ for_each_domain(cpu, sd) {
+ enum cpu_idle_type itype;
+
+ seq_printf(seq, "domain%d %*pb", dcount++,
+ cpumask_pr_args(sched_domain_span(sd)));
+ for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
+ itype++) {
+ seq_printf(seq, " %u %u %u %u %u %u %u %u",
+ sd->lb_count[itype],
+ sd->lb_balanced[itype],
+ sd->lb_failed[itype],
+ sd->lb_imbalance[itype],
+ sd->lb_gained[itype],
+ sd->lb_hot_gained[itype],
+ sd->lb_nobusyq[itype],
+ sd->lb_nobusyg[itype]);
+ }
+ seq_printf(seq,
+ " %u %u %u %u %u %u %u %u %u %u %u %u\n",
+ sd->alb_count, sd->alb_failed, sd->alb_pushed,
+ sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
+ sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
+ sd->ttwu_wake_remote, sd->ttwu_move_affine,
+ sd->ttwu_move_balance);
+ }
+ rcu_read_unlock();
+#endif
+}
+
+static void show_rqstat(struct seq_file *seq, int cpu)
{
- int cpu;
+ struct rq *rq = cpu_rq(cpu);
+
+ /* runqueue-specific stats */
+ seq_printf(seq,
+ "cpu%d %u 0 %u %u %u %u %llu %llu %lu\n",
+ cpu, rq->yld_count,
+ rq->sched_count, rq->sched_goidle,
+ rq->ttwu_count, rq->ttwu_local,
+ rq->rq_cpu_time,
+ rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
+}
+static int show_schedstat(struct seq_file *seq, void *v)
+{
if (v == (void *)1) {
seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
seq_printf(seq, "timestamp %lu\n", jiffies);
} else {
- struct rq *rq;
-#ifdef CONFIG_SMP
- struct sched_domain *sd;
- int dcount = 0;
-#endif
- cpu = (unsigned long)(v - 2);
- rq = cpu_rq(cpu);
+ int cpu = (unsigned long)(v - 2);
- /* runqueue-specific stats */
- seq_printf(seq,
- "cpu%d %u 0 %u %u %u %u %llu %llu %lu\n",
- cpu, rq->yld_count,
- rq->sched_count, rq->sched_goidle,
- rq->ttwu_count, rq->ttwu_local,
- rq->rq_cpu_time,
- rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
+ show_rqstat(seq, cpu);
+ show_domainstat(seq, cpu);
-#ifdef CONFIG_SMP
- /* domain-specific stats */
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- enum cpu_idle_type itype;
-
- seq_printf(seq, "domain%d %*pb", dcount++,
- cpumask_pr_args(sched_domain_span(sd)));
- for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
- itype++) {
- seq_printf(seq, " %u %u %u %u %u %u %u %u",
- sd->lb_count[itype],
- sd->lb_balanced[itype],
- sd->lb_failed[itype],
- sd->lb_imbalance[itype],
- sd->lb_gained[itype],
- sd->lb_hot_gained[itype],
- sd->lb_nobusyq[itype],
- sd->lb_nobusyg[itype]);
- }
- seq_printf(seq,
- " %u %u %u %u %u %u %u %u %u %u %u %u\n",
- sd->alb_count, sd->alb_failed, sd->alb_pushed,
- sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
- sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
- sd->ttwu_wake_remote, sd->ttwu_move_affine,
- sd->ttwu_move_balance);
- }
- rcu_read_unlock();
-#endif
}
return 0;
}
--
2.18.4
Powered by blists - more mailing lists