[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210507035230.3079-4-yao.jin@linux.intel.com>
Date: Fri, 7 May 2021 11:52:30 +0800
From: Jin Yao <yao.jin@...ux.intel.com>
To: acme@...nel.org, jolsa@...nel.org, peterz@...radead.org,
mingo@...hat.com, alexander.shishkin@...ux.intel.com
Cc: Linux-kernel@...r.kernel.org, ak@...ux.intel.com,
kan.liang@...el.com, yao.jin@...el.com,
Jin Yao <yao.jin@...ux.intel.com>
Subject: [PATCH v2 3/3] perf header: Process hybrid CPU_PMU_CAPS
For CPU_PMU_CAPS feature, the layout is extended to:
<pmu1 caps nr>
<caps string 1>
<caps string 2>
<caps string N>
<pmu1 name>
<nr of rest pmus to process>
<pmu2 caps nr>
<caps string 1>
<caps string 2>
<caps string N>
<pmu2 name>
<nr of rest pmus to process>
When we see '0' at 'nr of rest pmus to process', we know all the
pmu have been processed yet.
With this patch, some examples,
New perf tool with new perf.data
(new perf.data is generated on hybrid platform):
root@...pl-adl-s-2:~# perf report --header-only -I
...
# cpu_core pmu capabilities: branches=32, max_precise=3, pmu_name=alderlake_hybrid
# cpu_atom pmu capabilities: branches=32, max_precise=3, pmu_name=alderlake_hybrid
New perf tool with new perf.data
(new perf.data is generated on non-hybrid platform):
root@...-ppc:~# perf report --header-only -I
...
# cpu pmu capabilities: branches=32, max_precise=3, pmu_name=skylake
New perf tool with old perf.data
(old perf.data is generated by old perf tool on non-hybrid platform):
root@...-ppc:~# perf report --header-only -I
...
# cpu pmu capabilities: branches=32, max_precise=3, pmu_name=skylake
Signed-off-by: Jin Yao <yao.jin@...ux.intel.com>
---
tools/perf/util/env.c | 6 +++
tools/perf/util/env.h | 11 +++-
tools/perf/util/header.c | 114 ++++++++++++++++++++++++++++++++++-----
3 files changed, 115 insertions(+), 16 deletions(-)
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 744ae87b5bfa..ea952ec507da 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -208,6 +208,12 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->hybrid_nodes[i].cpus);
}
zfree(&env->hybrid_nodes);
+
+ for (i = 0; i < env->nr_cpu_pmu_caps_nodes; i++) {
+ zfree(&env->cpu_pmu_caps_nodes[i].cpu_pmu_caps);
+ zfree(&env->cpu_pmu_caps_nodes[i].pmu_name);
+ }
+ zfree(&env->cpu_pmu_caps_nodes);
}
void perf_env__init(struct perf_env *env __maybe_unused)
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index e5e5deebe68d..5885c055b63e 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -42,6 +42,13 @@ struct hybrid_node {
char *cpus;
};
+struct cpu_pmu_caps_node {
+ int nr_cpu_pmu_caps;
+ unsigned int max_branches;
+ char *cpu_pmu_caps;
+ char *pmu_name;
+};
+
struct perf_env {
char *hostname;
char *os_release;
@@ -63,15 +70,14 @@ struct perf_env {
int nr_memory_nodes;
int nr_pmu_mappings;
int nr_groups;
- int nr_cpu_pmu_caps;
int nr_hybrid_nodes;
+ int nr_cpu_pmu_caps_nodes;
char *cmdline;
const char **cmdline_argv;
char *sibling_cores;
char *sibling_dies;
char *sibling_threads;
char *pmu_mappings;
- char *cpu_pmu_caps;
struct cpu_topology_map *cpu;
struct cpu_cache_level *caches;
int caches_cnt;
@@ -84,6 +90,7 @@ struct perf_env {
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
struct hybrid_node *hybrid_nodes;
+ struct cpu_pmu_caps_node *cpu_pmu_caps_nodes;
#ifdef HAVE_LIBBPF_SUPPORT
/*
* bpf_info_lock protects bpf rbtrees. This is needed because the
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 578f37655cc9..37e9e3ba16f2 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2014,18 +2014,28 @@ static void print_compressed(struct feat_fd *ff, FILE *fp)
ff->ph->env.comp_level, ff->ph->env.comp_ratio);
}
-static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+static void print_per_cpu_pmu_caps(FILE *fp, struct cpu_pmu_caps_node *n)
{
- const char *delimiter = "# cpu pmu capabilities: ";
- u32 nr_caps = ff->ph->env.nr_cpu_pmu_caps;
- char *str;
+ const char *delimiter;
+ u32 nr_caps = n->nr_cpu_pmu_caps;
+ char *str, buf[128];
if (!nr_caps) {
- fprintf(fp, "# cpu pmu capabilities: not available\n");
+ if (!n->pmu_name)
+ fprintf(fp, "# cpu pmu capabilities: not available\n");
+ else
+ fprintf(fp, "# %s pmu capabilities: not available\n", n->pmu_name);
return;
}
- str = ff->ph->env.cpu_pmu_caps;
+ if (!n->pmu_name)
+ scnprintf(buf, sizeof(buf), "# cpu pmu capabilities: ");
+ else
+ scnprintf(buf, sizeof(buf), "# %s pmu capabilities: ", n->pmu_name);
+
+ delimiter = buf;
+
+ str = n->cpu_pmu_caps;
while (nr_caps--) {
fprintf(fp, "%s%s", delimiter, str);
delimiter = ", ";
@@ -2035,6 +2045,17 @@ static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
fprintf(fp, "\n");
}
+static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+{
+ struct cpu_pmu_caps_node *n;
+ int i;
+
+ for (i = 0; i < ff->ph->env.nr_cpu_pmu_caps_nodes; i++) {
+ n = &ff->ph->env.cpu_pmu_caps_nodes[i];
+ print_per_cpu_pmu_caps(fp, n);
+ }
+}
+
static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
{
const char *delimiter = "# pmu mappings: ";
@@ -3140,13 +3161,14 @@ static int process_compressed(struct feat_fd *ff,
return 0;
}
-static int process_cpu_pmu_caps(struct feat_fd *ff,
- void *data __maybe_unused)
+static int process_cpu_pmu_caps_node(struct feat_fd *ff,
+ struct cpu_pmu_caps_node *n, bool *end)
{
- char *name, *value;
+ char *name, *value, *pmu_name;
struct strbuf sb;
- u32 nr_caps;
+ u32 nr_caps, nr;
+ *end = false;
if (do_read_u32(ff, &nr_caps))
return -1;
@@ -3155,7 +3177,7 @@ static int process_cpu_pmu_caps(struct feat_fd *ff,
return 0;
}
- ff->ph->env.nr_cpu_pmu_caps = nr_caps;
+ n->nr_cpu_pmu_caps = nr_caps;
if (strbuf_init(&sb, 128) < 0)
return -1;
@@ -3176,13 +3198,33 @@ static int process_cpu_pmu_caps(struct feat_fd *ff,
if (strbuf_add(&sb, "", 1) < 0)
goto free_value;
- if (!strcmp(name, "branches"))
- ff->ph->env.max_branches = atoi(value);
+ if (!strcmp(name, "branches")) {
+ n->max_branches = atoi(value);
+ if (n->max_branches > ff->ph->env.max_branches)
+ ff->ph->env.max_branches = n->max_branches;
+ }
free(value);
free(name);
}
- ff->ph->env.cpu_pmu_caps = strbuf_detach(&sb, NULL);
+
+ /*
+ * Old perf.data may not have pmu_name,
+ */
+ pmu_name = do_read_string(ff);
+ if (!pmu_name || strncmp(pmu_name, "cpu_", 4)) {
+ *end = true;
+ goto out;
+ }
+
+ if (do_read_u32(ff, &nr))
+ return -1;
+
+ if (nr == 0)
+ *end = true;
+out:
+ n->cpu_pmu_caps = strbuf_detach(&sb, NULL);
+ n->pmu_name = pmu_name;
return 0;
free_value:
@@ -3194,6 +3236,50 @@ static int process_cpu_pmu_caps(struct feat_fd *ff,
return -1;
}
+static int process_cpu_pmu_caps(struct feat_fd *ff,
+ void *data __maybe_unused)
+{
+ struct cpu_pmu_caps_node *nodes = NULL, *tmp;
+ int ret, i, nr_alloc = 1, nr_used = 0;
+ bool end;
+
+ while (1) {
+ if (nr_used == nr_alloc || !nodes) {
+ nr_alloc *= 2;
+ tmp = realloc(nodes, sizeof(*nodes) * nr_alloc);
+ if (!tmp)
+ return -ENOMEM;
+ memset(tmp + nr_used, 0,
+ sizeof(*nodes) * (nr_alloc - nr_used));
+ nodes = tmp;
+ }
+
+ ret = process_cpu_pmu_caps_node(ff, &nodes[nr_used], &end);
+ if (ret) {
+ if (nr_used)
+ break;
+ goto err;
+ }
+
+ nr_used++;
+ if (end)
+ break;
+ }
+
+ ff->ph->env.nr_cpu_pmu_caps_nodes = (u32)nr_used;
+ ff->ph->env.cpu_pmu_caps_nodes = nodes;
+ return 0;
+
+err:
+ for (i = 0; i < nr_used; i++) {
+ free(nodes[i].cpu_pmu_caps);
+ free(nodes[i].pmu_name);
+ }
+
+ free(nodes);
+ return ret;
+}
+
#define FEAT_OPR(n, func, __full_only) \
[HEADER_##n] = { \
.name = __stringify(n), \
--
2.17.1
Powered by blists - more mailing lists