[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250826051039.2626894-9-swapnil.sapkal@amd.com>
Date: Tue, 26 Aug 2025 05:10:36 +0000
From: Swapnil Sapkal <swapnil.sapkal@....com>
To: <peterz@...radead.org>, <mingo@...hat.com>, <juri.lelli@...hat.com>,
<vincent.guittot@...aro.org>, <corbet@....net>
CC: <dietmar.eggemann@....com>, <rostedt@...dmis.org>, <bsegall@...gle.com>,
<mgorman@...e.de>, <vschneid@...hat.com>, <iamjoonsoo.kim@....com>,
<qyousef@...alina.io>, <sshegde@...ux.ibm.com>, <alexs@...nel.org>,
<lukasz.luba@....com>, <cpru@...zon.com>, <gautham.shenoy@....com>,
<kprateek.nayak@....com>, <ravi.bangoria@....com>, <swapnil.sapkal@....com>,
<linux-kernel@...r.kernel.org>, <linux-doc@...r.kernel.org>, James Clark
<james.clark@...aro.org>
Subject: [PATCH v4 08/11] perf sched stats: Add support for live mode
The live mode works similar to simple `perf stat` command, by profiling
the target and printing results on the terminal as soon as the target
finishes.
Example usage:
# perf sched stats -- sleep 10
Co-developed-by: Ravi Bangoria <ravi.bangoria@....com>
Signed-off-by: Ravi Bangoria <ravi.bangoria@....com>
Tested-by: James Clark <james.clark@...aro.org>
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@....com>
---
tools/perf/builtin-sched.c | 99 +++++++++++++++++++++++++++++++++++++-
tools/perf/util/header.c | 6 +--
tools/perf/util/header.h | 5 ++
3 files changed, 106 insertions(+), 4 deletions(-)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index e23018798f5b..ce04349cc4ff 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -4439,6 +4439,103 @@ static int perf_sched__schedstat_report(struct perf_sched *sched)
return err;
}
+static int process_synthesized_event_live(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return perf_sched__process_schedstat(NULL, event);
+}
+
+static int perf_sched__schedstat_live(struct perf_sched *sched,
+ int argc, const char **argv)
+{
+ struct cpu_domain_map **cd_map = NULL;
+ struct target target = {};
+ u32 __maybe_unused md;
+ struct evlist *evlist;
+ u32 nr = 0, sv;
+ int reset = 0;
+ int err = 0;
+
+ signal(SIGINT, sighandler);
+ signal(SIGCHLD, sighandler);
+ signal(SIGTERM, sighandler);
+
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ /*
+ * `perf sched schedstat` does not support workload profiling (-p pid)
+ * since /proc/schedstat file contains cpu specific data only. Hence, a
+ * profile target is either set of cpus or systemwide, never a process.
+ * Note that, although `-- <workload>` is supported, profile data are
+ * still cpu/systemwide.
+ */
+ if (cpu_list)
+ target.cpu_list = cpu_list;
+ else
+ target.system_wide = true;
+
+ if (argc) {
+ err = evlist__prepare_workload(evlist, &target, argv, false, NULL);
+ if (err)
+ goto out;
+ }
+
+ err = evlist__create_maps(evlist, &target);
+ if (err < 0)
+ goto out;
+
+ user_requested_cpus = evlist->core.user_requested_cpus;
+
+ err = perf_event__synthesize_schedstat(&(sched->tool),
+ process_synthesized_event_live,
+ user_requested_cpus);
+ if (err < 0)
+ goto out;
+
+ err = enable_sched_schedstats(&reset);
+ if (err < 0)
+ goto out;
+
+ if (argc)
+ evlist__start_workload(evlist);
+
+ /* wait for signal */
+ pause();
+
+ if (reset) {
+ err = disable_sched_schedstat();
+ if (err < 0)
+ goto out;
+ }
+
+ err = perf_event__synthesize_schedstat(&(sched->tool),
+ process_synthesized_event_live,
+ user_requested_cpus);
+ if (err)
+ goto out;
+
+ setup_pager();
+
+ if (list_empty(&cpu_head)) {
+ pr_err("Data is not available\n");
+ err = -1;
+ goto out;
+ }
+
+ nr = cpu__max_present_cpu().cpu;
+ cd_map = build_cpu_domain_map(&sv, &md, nr);
+ show_schedstat_data(&cpu_head, cd_map);
+out:
+ free_cpu_domain_info(cd_map, sv, nr);
+ free_schedstat(&cpu_head);
+ evlist__delete(evlist);
+ return err;
+}
+
static bool schedstat_events_exposed(void)
{
/*
@@ -4764,7 +4861,7 @@ int cmd_sched(int argc, const char **argv)
stats_usage, 0);
return perf_sched__schedstat_report(&sched);
}
- usage_with_options(stats_usage, stats_options);
+ return perf_sched__schedstat_live(&sched, argc, argv);
} else {
usage_with_options(sched_usage, sched_options);
}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 7ff7434bac2c..e8f4d00b5261 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1621,7 +1621,7 @@ static int write_pmu_caps(struct feat_fd *ff,
return 0;
}
-static void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr)
+void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr)
{
for (u32 i = 0; i < nr; i++) {
if (cd_map[i]->domains) {
@@ -1641,8 +1641,8 @@ static void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_v
free(cd_map);
}
-static struct cpu_domain_map **build_cpu_domain_map(u32 *schedstat_version, u32 *max_sched_domains,
- u32 nr)
+struct cpu_domain_map **build_cpu_domain_map(u32 *schedstat_version, u32 *max_sched_domains,
+ u32 nr)
{
struct domain_info *domain_info;
struct cpu_domain_map **cd_map;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index edcb95e0dc49..d67d26dad88e 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -209,4 +209,9 @@ char *get_cpuid_str(struct perf_cpu cpu);
char *get_cpuid_allow_env_override(struct perf_cpu cpu);
int strcmp_cpuid_str(const char *s1, const char *s2);
+
+struct cpu_domain_map **build_cpu_domain_map(u32 *schedstat_version, u32 *max_sched_domains,
+ u32 nr);
+
+void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr);
#endif /* __PERF_HEADER_H */
--
2.43.0
Powered by blists - more mailing lists