lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-078c33862e042b3778dce3bcc8eaef84ab40715c@git.kernel.org>
Date:	Sat, 16 Jul 2016 13:48:52 -0700
From:	tip-bot for Wang Nan <tipbot@...or.com>
To:	linux-tip-commits@...r.kernel.org
Cc:	lizefan@...wei.com, tglx@...utronix.de, nilayvaish@...il.com,
	namhyung@...nel.org, mhiramat@...nel.org, acme@...hat.com,
	wangnan0@...wei.com, hekuang@...wei.com, jolsa@...nel.org,
	hpa@...or.com, linux-kernel@...r.kernel.org, mingo@...nel.org
Subject: [tip:perf/core] perf evlist: Map backward events to backward_mmap

Commit-ID:  078c33862e042b3778dce3bcc8eaef84ab40715c
Gitweb:     http://git.kernel.org/tip/078c33862e042b3778dce3bcc8eaef84ab40715c
Author:     Wang Nan <wangnan0@...wei.com>
AuthorDate: Thu, 14 Jul 2016 08:34:40 +0000
Committer:  Arnaldo Carvalho de Melo <acme@...hat.com>
CommitDate: Fri, 15 Jul 2016 17:27:48 -0300

perf evlist: Map backward events to backward_mmap

In perf_evlist__mmap_per_evsel(), select backward_mmap for backward
events.  Utilize new perf_mmap APIs. Dynamically alloc backward_mmap.

Remove useless functions.

Signed-off-by: Wang Nan <wangnan0@...wei.com>
Acked-by: Jiri Olsa <jolsa@...nel.org>
Cc: He Kuang <hekuang@...wei.com>
Cc: Masami Hiramatsu <mhiramat@...nel.org>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: Nilay Vaish <nilayvaish@...il.com>
Cc: Zefan Li <lizefan@...wei.com>
Cc: pi3orama@....com
Link: http://lkml.kernel.org/r/1468485287-33422-9-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
---
 tools/perf/tests/backward-ring-buffer.c |  4 +--
 tools/perf/util/evlist.c                | 54 ++++++++++++++++-----------------
 2 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 5cee387..b2c6348 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -31,8 +31,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
 	for (i = 0; i < evlist->nr_mmaps; i++) {
 		union perf_event *event;
 
-		perf_evlist__mmap_read_catchup(evlist, i);
-		while ((event = perf_evlist__mmap_read_backward(evlist, i)) != NULL) {
+		perf_mmap__read_catchup(&evlist->backward_mmap[i]);
+		while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) {
 			const u32 type = event->header.type;
 
 			switch (type) {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 24927e1..7570f90 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -27,7 +27,6 @@
 #include <linux/log2.h>
 #include <linux/err.h>
 
-static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
 static void perf_mmap__munmap(struct perf_mmap *map);
 static void perf_mmap__put(struct perf_mmap *map);
 
@@ -692,8 +691,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
 {
 	int i;
 
+	if (!evlist->backward_mmap)
+		return 0;
+
 	for (i = 0; i < evlist->nr_mmaps; i++) {
-		int fd = evlist->mmap[i].fd;
+		int fd = evlist->backward_mmap[i].fd;
 		int err;
 
 		if (fd < 0)
@@ -904,16 +906,6 @@ static void perf_mmap__put(struct perf_mmap *md)
 		perf_mmap__munmap(md);
 }
 
-static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
-{
-	perf_mmap__get(&evlist->mmap[idx]);
-}
-
-static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
-{
-	perf_mmap__put(&evlist->mmap[idx]);
-}
-
 void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
 {
 	if (!overwrite) {
@@ -1049,12 +1041,6 @@ static int perf_mmap__mmap(struct perf_mmap *map,
 	return 0;
 }
 
-static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
-			       struct mmap_params *mp, int fd)
-{
-	return perf_mmap__mmap(&evlist->mmap[idx], mp, fd);
-}
-
 static bool
 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
 			 struct perf_evsel *evsel)
@@ -1066,16 +1052,27 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
 
 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
 				       struct mmap_params *mp, int cpu,
-				       int thread, int *output)
+				       int thread, int *_output, int *_output_backward)
 {
 	struct perf_evsel *evsel;
 	int revent;
 
 	evlist__for_each_entry(evlist, evsel) {
+		struct perf_mmap *maps = evlist->mmap;
+		int *output = _output;
 		int fd;
 
-		if (!!evsel->attr.write_backward != (evlist->overwrite && evlist->backward))
-			continue;
+		if (evsel->attr.write_backward) {
+			output = _output_backward;
+			maps = evlist->backward_mmap;
+
+			if (!maps) {
+				maps = perf_evlist__alloc_mmap(evlist);
+				if (!maps)
+					return -1;
+				evlist->backward_mmap = maps;
+			}
+		}
 
 		if (evsel->system_wide && thread)
 			continue;
@@ -1084,13 +1081,14 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
 
 		if (*output == -1) {
 			*output = fd;
-			if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
+
+			if (perf_mmap__mmap(&maps[idx], mp, *output)  < 0)
 				return -1;
 		} else {
 			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
 				return -1;
 
-			perf_evlist__mmap_get(evlist, idx);
+			perf_mmap__get(&maps[idx]);
 		}
 
 		revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
@@ -1103,8 +1101,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
 		 * Therefore don't add it for polling.
 		 */
 		if (!evsel->system_wide &&
-		    __perf_evlist__add_pollfd(evlist, fd, &evlist->mmap[idx], revent) < 0) {
-			perf_evlist__mmap_put(evlist, idx);
+		    __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
+			perf_mmap__put(&maps[idx]);
 			return -1;
 		}
 
@@ -1130,13 +1128,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
 	pr_debug2("perf event ring buffer mmapped per cpu\n");
 	for (cpu = 0; cpu < nr_cpus; cpu++) {
 		int output = -1;
+		int output_backward = -1;
 
 		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
 					      true);
 
 		for (thread = 0; thread < nr_threads; thread++) {
 			if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
-							thread, &output))
+							thread, &output, &output_backward))
 				goto out_unmap;
 		}
 	}
@@ -1157,12 +1156,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
 	pr_debug2("perf event ring buffer mmapped per thread\n");
 	for (thread = 0; thread < nr_threads; thread++) {
 		int output = -1;
+		int output_backward = -1;
 
 		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
 					      false);
 
 		if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
-						&output))
+						&output, &output_backward))
 			goto out_unmap;
 	}
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ