[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220601032608.1034-5-ravi.bangoria@amd.com>
Date: Wed, 1 Jun 2022 08:56:04 +0530
From: Ravi Bangoria <ravi.bangoria@....com>
To: <acme@...nel.org>, <kan.liang@...ux.intel.com>
CC: <ravi.bangoria@....com>, <jolsa@...nel.org>, <irogers@...gle.com>,
<peterz@...radead.org>, <rrichter@....com>, <mingo@...hat.com>,
<mark.rutland@....com>, <namhyung@...nel.org>,
<tglx@...utronix.de>, <bp@...en8.de>, <james.clark@....com>,
<leo.yan@...aro.org>, <ak@...ux.intel.com>, <eranian@...gle.com>,
<like.xu.linux@...il.com>, <x86@...nel.org>,
<linux-perf-users@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<sandipan.das@....com>, <ananth.narayan@....com>,
<kim.phillips@....com>, <santosh.shukla@....com>
Subject: [PATCH v5 4/8] perf headers: Store pmu caps in an array of strings
Currently all capabilities are stored in a single string separated
by NULL character. Instead, store them in an array which makes
searching of capability easier.
Signed-off-by: Ravi Bangoria <ravi.bangoria@....com>
---
tools/perf/util/env.c | 6 +++-
tools/perf/util/env.h | 4 +--
tools/perf/util/header.c | 70 +++++++++++++++++++++++-----------------
3 files changed, 48 insertions(+), 32 deletions(-)
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 579e44c59914..7d3aeb2e4622 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -179,7 +179,7 @@ static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
void perf_env__exit(struct perf_env *env)
{
- int i;
+ int i, j;
perf_env__purge_bpf(env);
perf_env__purge_cgroups(env);
@@ -196,6 +196,8 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ for (i = 0; i < env->nr_cpu_pmu_caps; i++)
+ zfree(&env->cpu_pmu_caps[i]);
zfree(&env->cpu_pmu_caps);
zfree(&env->numa_map);
@@ -218,6 +220,8 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->hybrid_nodes);
for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
+ for (j = 0; j < env->hybrid_cpc_nodes[i].nr_cpu_pmu_caps; j++)
+ zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps[j]);
zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
zfree(&env->hybrid_cpc_nodes[i].pmu_name);
}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index a3541f98e1fc..43aab59f7322 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -46,7 +46,7 @@ struct hybrid_node {
struct hybrid_cpc_node {
int nr_cpu_pmu_caps;
unsigned int max_branches;
- char *cpu_pmu_caps;
+ char **cpu_pmu_caps;
char *pmu_name;
};
@@ -81,7 +81,7 @@ struct perf_env {
char *sibling_dies;
char *sibling_threads;
char *pmu_mappings;
- char *cpu_pmu_caps;
+ char **cpu_pmu_caps;
struct cpu_topology_map *cpu;
struct cpu_cache_level *caches;
int caches_cnt;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index ee7ccd94e272..a1e4ec53333d 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2051,26 +2051,21 @@ static void print_compressed(struct feat_fd *ff, FILE *fp)
ff->ph->env.comp_level, ff->ph->env.comp_ratio);
}
-static void print_per_cpu_pmu_caps(FILE *fp, int nr_caps, char *cpu_pmu_caps,
+static void print_per_cpu_pmu_caps(FILE *fp, int nr_caps, char **cpu_pmu_caps,
char *pmu_name)
{
- const char *delimiter;
- char *str, buf[128];
+ const char *delimiter = "";
+ int i;
if (!nr_caps) {
fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
return;
}
- scnprintf(buf, sizeof(buf), "# %s pmu capabilities: ", pmu_name);
-
- delimiter = buf;
-
- str = cpu_pmu_caps;
- while (nr_caps--) {
- fprintf(fp, "%s%s", delimiter, str);
+ fprintf(fp, "# %s pmu capabilities: ", pmu_name);
+ for (i = 0; i < nr_caps; i++) {
+ fprintf(fp, "%s%s", delimiter, cpu_pmu_caps[i]);
delimiter = ", ";
- str += strlen(str) + 1;
}
fprintf(fp, "\n");
@@ -3202,27 +3197,27 @@ static int process_compressed(struct feat_fd *ff,
}
static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
- char **cpu_pmu_caps,
+ char ***cpu_pmu_caps,
unsigned int *max_branches)
{
- char *name, *value;
- struct strbuf sb;
- u32 nr_caps;
+ int name_size, value_size;
+ char *name, *value, *ptr;
+ u32 nr_caps, i;
+
+ *nr_cpu_pmu_caps = 0;
+ *cpu_pmu_caps = NULL;
if (do_read_u32(ff, &nr_caps))
return -1;
- if (!nr_caps) {
- pr_debug("cpu pmu capabilities not available\n");
+ if (!nr_caps)
return 0;
- }
-
- *nr_cpu_pmu_caps = nr_caps;
- if (strbuf_init(&sb, 128) < 0)
+ *cpu_pmu_caps = zalloc(sizeof(char *) * nr_caps);
+ if (!*cpu_pmu_caps)
return -1;
- while (nr_caps--) {
+ for (i = 0; i < nr_caps; i++) {
name = do_read_string(ff);
if (!name)
goto error;
@@ -3231,12 +3226,16 @@ static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
if (!value)
goto free_name;
- if (strbuf_addf(&sb, "%s=%s", name, value) < 0)
+ name_size = strlen(name);
+ value_size = strlen(value);
+ ptr = zalloc(sizeof(char) * (name_size + value_size + 2));
+ if (!ptr)
goto free_value;
- /* include a NULL character at the end */
- if (strbuf_add(&sb, "", 1) < 0)
- goto free_value;
+ memcpy(ptr, name, name_size);
+ ptr[name_size] = '=';
+ memcpy(ptr + name_size + 1, value, value_size);
+ (*cpu_pmu_caps)[i] = ptr;
if (!strcmp(name, "branches"))
*max_branches = atoi(value);
@@ -3244,7 +3243,7 @@ static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
free(value);
free(name);
}
- *cpu_pmu_caps = strbuf_detach(&sb, NULL);
+ *nr_cpu_pmu_caps = nr_caps;
return 0;
free_value:
@@ -3252,16 +3251,24 @@ static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
free_name:
free(name);
error:
- strbuf_release(&sb);
+ for (; i > 0; i--)
+ free((*cpu_pmu_caps)[i - 1]);
+ free(*cpu_pmu_caps);
+ *cpu_pmu_caps = NULL;
+ *nr_cpu_pmu_caps = 0;
return -1;
}
static int process_cpu_pmu_caps(struct feat_fd *ff,
void *data __maybe_unused)
{
- return process_per_cpu_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
+ int ret = process_per_cpu_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
&ff->ph->env.cpu_pmu_caps,
&ff->ph->env.max_branches);
+
+ if (!ret && !ff->ph->env.cpu_pmu_caps)
+ pr_debug("cpu pmu capabilities not available\n");
+ return ret;
}
static int process_hybrid_cpu_pmu_caps(struct feat_fd *ff,
@@ -3270,6 +3277,7 @@ static int process_hybrid_cpu_pmu_caps(struct feat_fd *ff,
struct hybrid_cpc_node *nodes;
u32 nr_pmu, i;
int ret;
+ int j;
if (do_read_u32(ff, &nr_pmu))
return -1;
@@ -3297,6 +3305,8 @@ static int process_hybrid_cpu_pmu_caps(struct feat_fd *ff,
ret = -1;
goto err;
}
+ if (!n->nr_cpu_pmu_caps)
+ pr_debug("%s pmu capabilities not available\n", n->pmu_name);
}
ff->ph->env.nr_hybrid_cpc_nodes = nr_pmu;
@@ -3305,6 +3315,8 @@ static int process_hybrid_cpu_pmu_caps(struct feat_fd *ff,
err:
for (i = 0; i < nr_pmu; i++) {
+ for (j = 0; j < nodes[i].nr_cpu_pmu_caps; j++)
+ free(nodes[i].cpu_pmu_caps[j]);
free(nodes[i].cpu_pmu_caps);
free(nodes[i].pmu_name);
}
--
2.31.1
Powered by blists - more mailing lists