[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190503081841.1908-9-jolsa@kernel.org>
Date: Fri, 3 May 2019 10:18:37 +0200
From: Jiri Olsa <jolsa@...nel.org>
To: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: Adrian Hunter <adrian.hunter@...el.com>,
lkml <linux-kernel@...r.kernel.org>,
Ingo Molnar <mingo@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Stanislav Fomichev <sdf@...ichev.me>,
Song Liu <songliubraving@...com>,
Andi Kleen <ak@...ux.intel.com>
Subject: [PATCH 08/12] perf tools: Preserve eBPF maps when loading kcore
We need to preserve eBPF maps even if they are
covered by kcore, because we need to access
eBPF dso for source data.
Adding map_groups__merge_in function to do that.
It merges map into map_groups by splitting the
new map within the existing map regions.
Suggested-by: Adrian Hunter <adrian.hunter@...el.com>
Link: http://lkml.kernel.org/n/tip-mlu13e9zl6rbsz4fa00x7mfa@git.kernel.org
Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
tools/perf/util/symbol.c | 97 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 93 insertions(+), 4 deletions(-)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 5cbad55cd99d..29780fcd049c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1166,6 +1166,85 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
return 0;
}
+/*
+ * Merges map into map_groups by splitting the new map
+ * within the existing map regions.
+ */
+static int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
+{
+ struct map *old_map;
+ LIST_HEAD(merged);
+
+ for (old_map = map_groups__first(kmaps); old_map;
+ old_map = map_groups__next(old_map)) {
+
+ /* no overload with this one */
+ if (new_map->end < old_map->start ||
+ new_map->start >= old_map->end)
+ continue;
+
+ if (new_map->start < old_map->start) {
+ /*
+ * |new......
+ * |old....
+ */
+ if (new_map->end < old_map->end) {
+ /*
+ * |new......| -> |new..|
+ * |old....| -> |old....|
+ */
+ new_map->end = old_map->start;
+ } else {
+ /*
+ * |new.............| -> |new..| |new..|
+ * |old....| -> |old....|
+ */
+ struct map *m = map__clone(new_map);
+
+ if (!m)
+ return -ENOMEM;
+
+ m->end = old_map->start;
+ list_add_tail(&m->node, &merged);
+ new_map->start = old_map->end;
+ }
+ } else {
+ /*
+ * |new......
+ * |old....
+ */
+ if (new_map->end < old_map->end) {
+ /*
+ * |new..| -> x
+ * |old.........| -> |old.........|
+ */
+ map__put(new_map);
+ new_map = NULL;
+ break;
+ } else {
+ /*
+ * |new......| -> |new...|
+ * |old....| -> |old....|
+ */
+ new_map->start = old_map->end;
+ }
+ }
+ }
+
+ while (!list_empty(&merged)) {
+ old_map = list_entry(merged.next, struct map, node);
+ list_del_init(&old_map->node);
+ map_groups__insert(kmaps, old_map);
+ map__put(old_map);
+ }
+
+ if (new_map) {
+ map_groups__insert(kmaps, new_map);
+ map__put(new_map);
+ }
+ return 0;
+}
+
static int dso__load_kcore(struct dso *dso, struct map *map,
const char *kallsyms_filename)
{
@@ -1222,7 +1301,12 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
while (old_map) {
struct map *next = map_groups__next(old_map);
- if (old_map != map)
+ /*
+ * We need to preserve eBPF maps even if they are
+ * covered by kcore, because we need to access
+ * eBPF dso for source data.
+ */
+ if (old_map != map && !__map__is_bpf_prog(old_map))
map_groups__remove(kmaps, old_map);
old_map = next;
}
@@ -1256,11 +1340,16 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
map_groups__remove(kmaps, map);
map_groups__insert(kmaps, map);
map__put(map);
+ map__put(new_map);
} else {
- map_groups__insert(kmaps, new_map);
+ /*
+ * Merge kcore map into existing maps,
+ * and ensure that current maps (eBPF)
+ * stay intact.
+ */
+ if (map_groups__merge_in(kmaps, new_map))
+ goto out_err;
}
-
- map__put(new_map);
}
if (machine__is(machine, "x86_64")) {
--
2.20.1
Powered by blists - more mailing lists