[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230320212248.1175731-2-irogers@google.com>
Date: Mon, 20 Mar 2023 14:22:32 -0700
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Darren Hart <dvhart@...radead.org>,
Davidlohr Bueso <dave@...olabs.net>,
James Clark <james.clark@....com>,
John Garry <john.g.garry@...cle.com>,
Riccardo Mancini <rickyman7@...il.com>,
Yury Norov <yury.norov@...il.com>,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Adrian Hunter <adrian.hunter@...el.com>,
Leo Yan <leo.yan@...aro.org>, Andi Kleen <ak@...ux.intel.com>,
Thomas Richter <tmricht@...ux.ibm.com>,
Kan Liang <kan.liang@...ux.intel.com>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Shunsuke Nakamura <nakamura.shun@...itsu.com>,
Song Liu <song@...nel.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Steven Rostedt <rostedt@...dmis.org>,
Miaoqian Lin <linmq006@...il.com>,
Stephen Brennan <stephen.s.brennan@...cle.com>,
Kajol Jain <kjain@...ux.ibm.com>,
Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>,
German Gomez <german.gomez@....com>,
linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org,
Eric Dumazet <edumazet@...gle.com>,
Dmitry Vyukov <dvyukov@...gle.com>, Hao Luo <haoluo@...gle.com>
Cc: Stephane Eranian <eranian@...gle.com>,
Ian Rogers <irogers@...gle.com>
Subject: [PATCH v5 01/17] perf map: Move map list node into symbol
Using a perf map as a list node is only done in symbol. Move the
list_node struct into symbol as a single pointer to the map. This
makes reference count behavior more obvious and easy to check.
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
tools/perf/util/map.h | 5 +--
tools/perf/util/symbol.c | 93 ++++++++++++++++++++++++++--------------
2 files changed, 63 insertions(+), 35 deletions(-)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 3dcfe06db6b3..2879cae05ee0 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -16,10 +16,7 @@ struct maps;
struct machine;
struct map {
- union {
- struct rb_node rb_node;
- struct list_head node;
- };
+ struct rb_node rb_node;
u64 start;
u64 end;
bool erange_warned:1;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index a458aa8b87bb..65e0c3d126f1 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -48,6 +48,11 @@ static bool symbol__is_idle(const char *name);
int vmlinux_path__nr_entries;
char **vmlinux_path;
+struct map_list_node {
+ struct list_head node;
+ struct map *map;
+};
+
struct symbol_conf symbol_conf = {
.nanosecs = false,
.use_modules = true,
@@ -85,6 +90,11 @@ static enum dso_binary_type binary_type_symtab[] = {
#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
+static struct map_list_node *map_list_node__new(void)
+{
+ return malloc(sizeof(struct map_list_node));
+}
+
static bool symbol_type__filter(char symbol_type)
{
symbol_type = toupper(symbol_type);
@@ -1219,16 +1229,21 @@ struct kcore_mapfn_data {
static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
{
struct kcore_mapfn_data *md = data;
- struct map *map;
+ struct map_list_node *list_node = map_list_node__new();
- map = map__new2(start, md->dso);
- if (map == NULL)
+ if (!list_node)
return -ENOMEM;
- map->end = map->start + len;
- map->pgoff = pgoff;
+ list_node->map = map__new2(start, md->dso);
+ if (!list_node->map) {
+ free(list_node);
+ return -ENOMEM;
+ }
+
+ list_node->map->end = list_node->map->start + len;
+ list_node->map->pgoff = pgoff;
- list_add(&map->node, &md->maps);
+ list_add(&list_node->node, &md->maps);
return 0;
}
@@ -1264,12 +1279,18 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
* |new.............| -> |new..| |new..|
* |old....| -> |old....|
*/
- struct map *m = map__clone(new_map);
+ struct map_list_node *m = map_list_node__new();
if (!m)
return -ENOMEM;
- m->end = old_map->start;
+ m->map = map__clone(new_map);
+ if (!m->map) {
+ free(m);
+ return -ENOMEM;
+ }
+
+ m->map->end = old_map->start;
list_add_tail(&m->node, &merged);
new_map->pgoff += old_map->end - new_map->start;
new_map->start = old_map->end;
@@ -1299,10 +1320,13 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
}
while (!list_empty(&merged)) {
- old_map = list_entry(merged.next, struct map, node);
- list_del_init(&old_map->node);
- maps__insert(kmaps, old_map);
- map__put(old_map);
+ struct map_list_node *old_node;
+
+ old_node = list_entry(merged.next, struct map_list_node, node);
+ list_del_init(&old_node->node);
+ maps__insert(kmaps, old_node->map);
+ map__put(old_node->map);
+ free(old_node);
}
if (new_map) {
@@ -1317,7 +1341,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
{
struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
- struct map *old_map, *new_map, *replacement_map = NULL, *next;
+ struct map *old_map, *replacement_map = NULL, *next;
struct machine *machine;
bool is_64_bit;
int err, fd;
@@ -1378,11 +1402,12 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
/* Find the kernel map using the '_stext' symbol */
if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
u64 replacement_size = 0;
+ struct map_list_node *new_node;
- list_for_each_entry(new_map, &md.maps, node) {
- u64 new_size = new_map->end - new_map->start;
+ list_for_each_entry(new_node, &md.maps, node) {
+ u64 new_size = new_node->map->end - new_node->map->start;
- if (!(stext >= new_map->start && stext < new_map->end))
+ if (!(stext >= new_node->map->start && stext < new_node->map->end))
continue;
/*
@@ -1392,40 +1417,43 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
* falls within more than one in the list.
*/
if (!replacement_map || new_size < replacement_size) {
- replacement_map = new_map;
+ replacement_map = new_node->map;
replacement_size = new_size;
}
}
}
if (!replacement_map)
- replacement_map = list_entry(md.maps.next, struct map, node);
+ replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
/* Add new maps */
while (!list_empty(&md.maps)) {
- new_map = list_entry(md.maps.next, struct map, node);
- list_del_init(&new_map->node);
- if (new_map == replacement_map) {
- map->start = new_map->start;
- map->end = new_map->end;
- map->pgoff = new_map->pgoff;
- map->map_ip = new_map->map_ip;
- map->unmap_ip = new_map->unmap_ip;
+ struct map_list_node *new_node;
+
+ new_node = list_entry(md.maps.next, struct map_list_node, node);
+ list_del_init(&new_node->node);
+ if (new_node->map == replacement_map) {
+ map->start = new_node->map->start;
+ map->end = new_node->map->end;
+ map->pgoff = new_node->map->pgoff;
+ map->map_ip = new_node->map->map_ip;
+ map->unmap_ip = new_node->map->unmap_ip;
/* Ensure maps are correctly ordered */
map__get(map);
maps__remove(kmaps, map);
maps__insert(kmaps, map);
map__put(map);
- map__put(new_map);
+ map__put(new_node->map);
} else {
/*
* Merge kcore map into existing maps,
* and ensure that current maps (eBPF)
* stay intact.
*/
- if (maps__merge_in(kmaps, new_map))
+ if (maps__merge_in(kmaps, new_node->map))
goto out_err;
}
+ free(new_node);
}
if (machine__is(machine, "x86_64")) {
@@ -1462,9 +1490,12 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
out_err:
while (!list_empty(&md.maps)) {
- map = list_entry(md.maps.next, struct map, node);
- list_del_init(&map->node);
- map__put(map);
+ struct map_list_node *list_node;
+
+ list_node = list_entry(md.maps.next, struct map_list_node, node);
+ list_del_init(&list_node->node);
+ map__put(list_node->map);
+ free(list_node);
}
close(fd);
return -EINVAL;
--
2.40.0.rc1.284.g88254d51c5-goog
Powered by blists - more mailing lists