lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170724234015.5165-5-andi@firstfloor.org>
Date:   Mon, 24 Jul 2017 16:40:04 -0700
From:   Andi Kleen <andi@...stfloor.org>
To:     acme@...nel.org
Cc:     jolsa@...nel.org, linux-kernel@...r.kernel.org,
        Andi Kleen <ak@...ux.intel.com>
Subject: [PATCH v1 04/15] perf, tools: Support weak groups

From: Andi Kleen <ak@...ux.intel.com>

Setting up groups can be complicated due to the
complicated scheduling restrictions of different PMUs.
User tools usually don't understand all these restrictions.
Still in many cases it is useful to set up groups and
they work most of the time. However if the group
is set up wrong some members will not reported any values
because they never get scheduled.

Add a concept of a 'weak group': try to set up a group,
but if it's not schedulable fallback to not using
a group. That gives us the best of both worlds:
groups if they work, but still a usable fallback if they don't.

In theory it would be possible to have more complex fallback
strategies (e.g. try to split the group in half), but
the simple fallback of not using a group seems to work for now.

So far the weak group is only implemented for perf stat,
not for record.

Here's an unschedulable group (on IvyBridge with SMT on)

% perf stat -e '{branches,branch-misses,l1d.replacement,l2_lines_in.all,l2_rqsts.all_code_rd}' -a sleep 1

        73,806,067      branches
         4,848,144      branch-misses             #    6.57% of all branches
        14,754,458      l1d.replacement
        24,905,558      l2_lines_in.all
   <not supported>      l2_rqsts.all_code_rd         <------- will never report anything

With the weak group:

% perf stat -e '{branches,branch-misses,l1d.replacement,l2_lines_in.all,l2_rqsts.all_code_rd}:W' -a sleep 1

       125,366,055      branches                                                      (80.02%)
         9,208,402      branch-misses             #    7.35% of all branches          (80.01%)
        24,560,249      l1d.replacement                                               (80.00%)
        43,174,971      l2_lines_in.all                                               (80.05%)
        31,891,457      l2_rqsts.all_code_rd                                          (79.92%)

The extra event scheduled with some extra multiplexing

Signed-off-by: Andi Kleen <ak@...ux.intel.com>
---
 tools/perf/Documentation/perf-list.txt |  1 +
 tools/perf/builtin-stat.c              | 28 +++++++++++++++++++++++++++-
 tools/perf/util/evsel.h                |  1 +
 tools/perf/util/parse-events.c         |  8 +++++++-
 tools/perf/util/parse-events.l         |  2 +-
 5 files changed, 37 insertions(+), 3 deletions(-)

diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index f709de54707b..d432965d728d 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -47,6 +47,7 @@ counted. The following modifiers exist:
  P - use maximum detected precise level
  S - read sample value (PERF_SAMPLE_READ)
  D - pin the event to the PMU
+ W - group is weak and will fallback to non-group if not schedulable
 
 The 'p' modifier can be used for specifying how precise the instruction
 address should be. The 'p' modifier can be specified multiple times:
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 97d6b6c42014..551ed938e05c 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -564,7 +564,7 @@ static int __run_perf_stat(int argc, const char **argv)
 	int interval = stat_config.interval;
 	char msg[BUFSIZ];
 	unsigned long long t0, t1;
-	struct perf_evsel *counter;
+	struct perf_evsel *counter, *c2, *leader;
 	struct timespec ts;
 	size_t l;
 	int status = 0;
@@ -595,6 +595,32 @@ static int __run_perf_stat(int argc, const char **argv)
 	evlist__for_each_entry(evsel_list, counter) {
 try_again:
 		if (create_perf_stat_counter(counter) < 0) {
+			/* Weak group failed. Reset the group. */
+			if (errno == EINVAL &&
+			    counter->leader != counter &&
+			    counter->weak_group) {
+				bool is_open = true;
+
+				pr_debug("Weak group for %s/%d failed\n",
+						counter->leader->name, counter->nr_members);
+				leader = counter->leader;
+				evlist__for_each_entry(evsel_list, c2) {
+					if (c2 == counter)
+						is_open = false;
+					if (c2->leader == leader) {
+						if (is_open)
+							perf_evsel__close(c2,
+								c2->cpus ? c2->cpus->nr :
+								cpu_map__nr(evsel_list->cpus),
+								thread_map__nr(evsel_list->threads));
+						c2->leader = c2;
+						c2->nr_members = 0;
+					}
+				}
+				counter = leader;
+				goto try_again;
+			}
+
 			/*
 			 * PPC returns ENXIO for HW counters until 2.6.37
 			 * (behavior changed with commit b0a873e).
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index fb40ca3c6519..4a8456d0501c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -136,6 +136,7 @@ struct perf_evsel {
 	const char *		metric_name;
 	struct perf_evsel	**metric_events;
 	bool			collect_stat;
+	bool			weak_group;
 };
 
 union u64_swap {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 84e301073885..cd89b5cba8d2 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1358,6 +1358,7 @@ struct event_modifier {
 	int exclude_GH;
 	int sample_read;
 	int pinned;
+	int weak;
 };
 
 static int get_event_modifier(struct event_modifier *mod, char *str,
@@ -1376,6 +1377,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
 
 	int exclude = eu | ek | eh;
 	int exclude_GH = evsel ? evsel->exclude_GH : 0;
+	int weak = 0;
 
 	memset(mod, 0, sizeof(*mod));
 
@@ -1413,6 +1415,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
 			sample_read = 1;
 		} else if (*str == 'D') {
 			pinned = 1;
+		} else if (*str == 'W') {
+			weak = 1;
 		} else
 			break;
 
@@ -1443,6 +1447,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
 	mod->exclude_GH = exclude_GH;
 	mod->sample_read = sample_read;
 	mod->pinned = pinned;
+	mod->weak = weak;
 
 	return 0;
 }
@@ -1456,7 +1461,7 @@ static int check_modifier(char *str)
 	char *p = str;
 
 	/* The sizeof includes 0 byte as well. */
-	if (strlen(str) > (sizeof("ukhGHpppPSDI") - 1))
+	if (strlen(str) > (sizeof("ukhGHpppPSDIW") - 1))
 		return -1;
 
 	while (*p) {
@@ -1496,6 +1501,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
 		evsel->exclude_GH          = mod.exclude_GH;
 		evsel->sample_read         = mod.sample_read;
 		evsel->precise_max         = mod.precise_max;
+		evsel->weak_group	   = mod.weak;
 
 		if (perf_evsel__is_group_leader(evsel))
 			evsel->attr.pinned = mod.pinned;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 7fa3f2e851b0..07ee5bb3a8b0 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -146,7 +146,7 @@ name		[a-zA-Z_*?][a-zA-Z0-9_*?.]*
 name_minus	[a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
 drv_cfg_term	[a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
 /* If you add a modifier you need to update check_modifier() */
-modifier_event	[ukhpPGHSDI]+
+modifier_event	[ukhpPGHSDIW]+
 modifier_bp	[rwx]{1,3}
 
 %%
-- 
2.9.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ