[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <157544603406.21853.4873090291378641316.tip-bot2@tip-bot2>
Date: Wed, 04 Dec 2019 07:53:54 -0000
From: "tip-bot2 for Andi Kleen" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Andi Kleen <ak@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
x86 <x86@...nel.org>, LKML <linux-kernel@...r.kernel.org>
Subject: [tip: perf/urgent] perf stat: Use affinity for enabling/disabling events
The following commit has been merged into the perf/urgent branch of tip:
Commit-ID: 704e2f5b700da4c912635cf161c3e982737eb89e
Gitweb: https://git.kernel.org/tip/704e2f5b700da4c912635cf161c3e982737eb89e
Author: Andi Kleen <ak@...ux.intel.com>
AuthorDate: Wed, 20 Nov 2019 16:15:22 -08:00
Committer: Arnaldo Carvalho de Melo <acme@...hat.com>
CommitterDate: Fri, 29 Nov 2019 12:20:45 -03:00
perf stat: Use affinity for enabling/disabling events
Restructure event enabling/disabling to use affinity, which
minimizes the number of IPIs needed.
Before on a large test case with 94 CPUs:
% time seconds usecs/call calls errors syscall
------ ----------- ----------- --------- --------- ----------------
54.65 1.899986 22 84812 660 ioctl
after:
39.21 0.930451 10 84796 644 ioctl
Signed-off-by: Andi Kleen <ak@...ux.intel.com>
Acked-by: Jiri Olsa <jolsa@...nel.org>
Link: http://lore.kernel.org/lkml/20191121001522.180827-13-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
---
tools/perf/util/evlist.c | 40 ++++++++++++++++++++++++++++++++++++---
1 file changed, 37 insertions(+), 3 deletions(-)
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 096a4ea..1548237 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -378,11 +378,28 @@ bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
void evlist__disable(struct evlist *evlist)
{
struct evsel *pos;
+ struct affinity affinity;
+ int cpu, i;
+
+ if (affinity__setup(&affinity) < 0)
+ return;
+
+ evlist__for_each_cpu(evlist, i, cpu) {
+ affinity__set(&affinity, cpu);
+ evlist__for_each_entry(evlist, pos) {
+ if (evsel__cpu_iter_skip(pos, cpu))
+ continue;
+ if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd)
+ continue;
+ evsel__disable_cpu(pos, pos->cpu_iter - 1);
+ }
+ }
+ affinity__cleanup(&affinity);
evlist__for_each_entry(evlist, pos) {
- if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd)
+ if (!perf_evsel__is_group_leader(pos) || !pos->core.fd)
continue;
- evsel__disable(pos);
+ pos->disabled = true;
}
evlist->enabled = false;
@@ -391,11 +408,28 @@ void evlist__disable(struct evlist *evlist)
void evlist__enable(struct evlist *evlist)
{
struct evsel *pos;
+ struct affinity affinity;
+ int cpu, i;
+ if (affinity__setup(&affinity) < 0)
+ return;
+
+ evlist__for_each_cpu(evlist, i, cpu) {
+ affinity__set(&affinity, cpu);
+
+ evlist__for_each_entry(evlist, pos) {
+ if (evsel__cpu_iter_skip(pos, cpu))
+ continue;
+ if (!perf_evsel__is_group_leader(pos) || !pos->core.fd)
+ continue;
+ evsel__enable_cpu(pos, pos->cpu_iter - 1);
+ }
+ }
+ affinity__cleanup(&affinity);
evlist__for_each_entry(evlist, pos) {
if (!perf_evsel__is_group_leader(pos) || !pos->core.fd)
continue;
- evsel__enable(pos);
+ pos->disabled = false;
}
evlist->enabled = true;
Powered by blists - more mailing lists