[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-b2cb615d8aaba520fe351ff456f6c7730828b3fe@git.kernel.org>
Date: Sat, 16 Jul 2016 13:48:28 -0700
From: tip-bot for Wang Nan <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: tglx@...utronix.de, mhiramat@...nel.org, jolsa@...nel.org,
namhyung@...nel.org, lizefan@...wei.com, nilayvaish@...il.com,
wangnan0@...wei.com, linux-kernel@...r.kernel.org, acme@...hat.com,
hekuang@...wei.com, mingo@...nel.org, hpa@...or.com
Subject: [tip:perf/core] perf evlist: Introduce backward_mmap array for
evlist
Commit-ID: b2cb615d8aaba520fe351ff456f6c7730828b3fe
Gitweb: http://git.kernel.org/tip/b2cb615d8aaba520fe351ff456f6c7730828b3fe
Author: Wang Nan <wangnan0@...wei.com>
AuthorDate: Thu, 14 Jul 2016 08:34:39 +0000
Committer: Arnaldo Carvalho de Melo <acme@...hat.com>
CommitDate: Fri, 15 Jul 2016 17:27:48 -0300
perf evlist: Introduce backward_mmap array for evlist
Add backward_mmap to evlist, free it together with normal mmap.
Improve perf_evlist__pick_pc(), search backward_mmap if evlist->mmap is
not available.
This patch doesn't alloc this array. It will be allocated conditionally
in the following commits.
Signed-off-by: Wang Nan <wangnan0@...wei.com>
Acked-by: Jiri Olsa <jolsa@...nel.org>
Cc: He Kuang <hekuang@...wei.com>
Cc: Masami Hiramatsu <mhiramat@...nel.org>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: Nilay Vaish <nilayvaish@...il.com>
Cc: Zefan Li <lizefan@...wei.com>
Cc: pi3orama@....com
Link: http://lkml.kernel.org/r/1468485287-33422-8-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
---
tools/perf/builtin-record.c | 10 +++++++---
tools/perf/util/evlist.c | 12 ++++++++----
tools/perf/util/evlist.h | 1 +
3 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index d15517e..dbcb223 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -509,7 +509,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (!evlist)
return 0;
- maps = evlist->mmap;
+ maps = backward ? evlist->backward_mmap : evlist->mmap;
if (!maps)
return 0;
@@ -696,8 +696,12 @@ perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused
static const struct perf_event_mmap_page *
perf_evlist__pick_pc(struct perf_evlist *evlist)
{
- if (evlist && evlist->mmap && evlist->mmap[0].base)
- return evlist->mmap[0].base;
+ if (evlist) {
+ if (evlist->mmap && evlist->mmap[0].base)
+ return evlist->mmap[0].base;
+ if (evlist->backward_mmap && evlist->backward_mmap[0].base)
+ return evlist->backward_mmap[0].base;
+ }
return NULL;
}
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 54ae0a0..24927e1 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -123,6 +123,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
zfree(&evlist->mmap);
+ zfree(&evlist->backward_mmap);
fdarray__exit(&evlist->pollfd);
}
@@ -973,17 +974,20 @@ static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
{
int i;
- if (evlist->mmap == NULL)
- return;
+ if (evlist->mmap)
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ perf_mmap__munmap(&evlist->mmap[i]);
- for (i = 0; i < evlist->nr_mmaps; i++)
- perf_mmap__munmap(&evlist->mmap[i]);
+ if (evlist->backward_mmap)
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ perf_mmap__munmap(&evlist->backward_mmap[i]);
}
void perf_evlist__munmap(struct perf_evlist *evlist)
{
perf_evlist__munmap_nofree(evlist);
zfree(&evlist->mmap);
+ zfree(&evlist->backward_mmap);
}
static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 9e680c6..07a1ad0 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -61,6 +61,7 @@ struct perf_evlist {
} workload;
struct fdarray pollfd;
struct perf_mmap *mmap;
+ struct perf_mmap *backward_mmap;
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evsel *selected;
Powered by blists - more mailing lists