[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250123174713.25570-3-wuyun.abel@bytedance.com>
Date: Fri, 24 Jan 2025 01:47:02 +0800
From: Abel Wu <wuyun.abel@...edance.com>
To: Tejun Heo <tj@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
Michal Koutný <mkoutny@...e.com>,
Jonathan Corbet <corbet@....net>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Abel Wu <wuyun.abel@...edance.com>,
Bitao Hu <yaoma@...ux.alibaba.com>,
Thomas Gleixner <tglx@...utronix.de>,
Yury Norov <yury.norov@...il.com>,
Chen Ridong <chenridong@...wei.com>
Cc: cgroups@...r.kernel.org (open list:CONTROL GROUP (CGROUP)),
linux-doc@...r.kernel.org (open list:DOCUMENTATION),
linux-kernel@...r.kernel.org (open list)
Subject: [PATCH 2/3] cgroup/rstat: Cleanup cpu.stat once for all
There were efforts like b824766504e4 ("cgroup/rstat: add force idle show helper")
to escape from #ifdef hells, and there could be new stats coming out in
the future, let's clean it up once for all.
Signed-off-by: Abel Wu <wuyun.abel@...edance.com>
---
kernel/cgroup/rstat.c | 46 +++++++++++++++++++++++--------------------
1 file changed, 25 insertions(+), 21 deletions(-)
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index c2784c317cdd..6ad647f3e241 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -599,21 +599,38 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
}
}
+static struct bstat_entry {
+ const char *name;
+ const int offset;
+} bstats[] = {
+#define BSTAT_ENTRY(name, field) \
+ { name, offsetof(struct cgroup_base_stat, field) }
+ BSTAT_ENTRY("usage_usec", cputime.sum_exec_runtime),
+ BSTAT_ENTRY("user_usec", cputime.utime),
+ BSTAT_ENTRY("system_usec", cputime.stime),
+ BSTAT_ENTRY("nice_usec", ntime),
+#ifdef CONFIG_SCHED_CORE
+ BSTAT_ENTRY("core_sched.force_idle_usec", forceidle_sum),
+#endif
+ { NULL } /* must be at end */
+#undef BSTAT_ENTRY
+};
-static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat *bstat)
+static void __append_bstat(struct seq_file *seq, struct cgroup_base_stat *bstat,
+ struct bstat_entry *entry)
{
-#ifdef CONFIG_SCHED_CORE
- u64 forceidle_time = bstat->forceidle_sum;
+ u64 *val;
- do_div(forceidle_time, NSEC_PER_USEC);
- seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
-#endif
+ val = (void *)bstat + entry->offset;
+ do_div(*val, NSEC_PER_USEC);
+ seq_printf(seq, "%s %llu\n", entry->name, *val);
}
void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
struct cgroup_base_stat bstat;
+ struct bstat_entry *e;
if (cgroup_parent(cgrp)) {
cgroup_rstat_flush_hold(cgrp);
@@ -625,21 +642,8 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq)
root_cgroup_cputime(&bstat);
}
- do_div(bstat.cputime.sum_exec_runtime, NSEC_PER_USEC);
- do_div(bstat.cputime.utime, NSEC_PER_USEC);
- do_div(bstat.cputime.stime, NSEC_PER_USEC);
- do_div(bstat.ntime, NSEC_PER_USEC);
-
- seq_printf(seq, "usage_usec %llu\n"
- "user_usec %llu\n"
- "system_usec %llu\n"
- "nice_usec %llu\n",
- bstat.cputime.sum_exec_runtime,
- bstat.cputime.utime,
- bstat.cputime.stime,
- bstat.ntime);
-
- cgroup_force_idle_show(seq, &bstat);
+ for (e = bstats; e->name; e++)
+ __append_bstat(seq, &bstat, e);
}
/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
--
2.37.3
Powered by blists - more mailing lists