lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220117160931.1191712-3-acme@kernel.org>
Date:   Mon, 17 Jan 2022 13:09:29 -0300
From:   Arnaldo Carvalho de Melo <acme@...nel.org>
To:     Andi Kleen <andi@...stfloor.org>
Cc:     linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
        Arnaldo Carvalho de Melo <acme@...hat.com>,
        Adrian Hunter <adrian.hunter@...el.com>,
        Ian Rogers <irogers@...gle.com>, Jiri Olsa <jolsa@...nel.org>,
        Namhyung Kim <namhyung@...nel.org>
Subject: [PATCH 2/4] perf stat: No need to setup affinities when starting a workload

From: Arnaldo Carvalho de Melo <acme@...hat.com>

I.e. the simple:

  $ perf stat sleep 1

Uses a dummy CPU map and thus there is no need to setup/cleanup
affinities to avoid IPIs, etc.

With this we're down to a sched_getaffinity() call, in the libnuma
initialization, that probably can be removed in a followup patch.

Cc: Adrian Hunter <adrian.hunter@...el.com>
Cc: Andi Kleen <andi@...stfloor.org>
Cc: Ian Rogers <irogers@...gle.com>
Cc: Jiri Olsa <jolsa@...nel.org>
Cc: Namhyung Kim <namhyung@...nel.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
---
 tools/perf/builtin-stat.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 973ade18b72a9479..934e992c966fb950 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -788,7 +788,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 	const bool forks = (argc > 0);
 	bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
 	struct evlist_cpu_iterator evlist_cpu_itr;
-	struct affinity affinity;
+	struct affinity saved_affinity, *affinity = NULL;
 	int err;
 	bool second_pass = false;
 
@@ -803,8 +803,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 	if (group)
 		evlist__set_leader(evsel_list);
 
-	if (affinity__setup(&affinity) < 0)
-		return -1;
+	if (!cpu_map__is_dummy(evsel_list->core.cpus)) {
+		if (affinity__setup(&saved_affinity) < 0)
+			return -1;
+		affinity = &saved_affinity;
+	}
 
 	evlist__for_each_entry(evsel_list, counter) {
 		if (bpf_counter__load(counter, &target))
@@ -813,7 +816,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 			all_counters_use_bpf = false;
 	}
 
-	evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
+	evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
 		counter = evlist_cpu_itr.evsel;
 
 		/*
@@ -869,7 +872,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 		 */
 
 		/* First close errored or weak retry */
-		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
+		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
 			counter = evlist_cpu_itr.evsel;
 
 			if (!counter->reset_group && !counter->errored)
@@ -878,7 +881,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 			perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
 		}
 		/* Now reopen weak */
-		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
+		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
 			counter = evlist_cpu_itr.evsel;
 
 			if (!counter->reset_group && !counter->errored)
@@ -904,7 +907,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 			counter->supported = true;
 		}
 	}
-	affinity__cleanup(&affinity);
+	affinity__cleanup(affinity);
 
 	evlist__for_each_entry(evsel_list, counter) {
 		if (!counter->supported) {
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ