[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YS6Cqw8oHJ72zIRK@kernel.org>
Date: Tue, 31 Aug 2021 16:27:39 -0300
From: Arnaldo Carvalho de Melo <acme@...nel.org>
To: Riccardo Mancini <rickyman7@...il.com>
Cc: Ian Rogers <irogers@...gle.com>,
Namhyung Kim <namhyung@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Mark Rutland <mark.rutland@....com>,
Jiri Olsa <jolsa@...hat.com>, linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org
Subject: Re: [RFC PATCH v1 15/37] perf evsel: separate open preparation from
open itself
Em Sat, Aug 21, 2021 at 11:19:21AM +0200, Riccardo Mancini escreveu:
> This is a preparatory patch for the following patches with the goal to
> separate in evlist__open_cpu the actual perf_event_open, which could be
> performed in parallel, from the existing fallback mechanisms, which
> should be handled sequentially.
Thanks, applied as the end result is equivalent and we erode this
patchkit a bit more.
- Arnaldo
> This patch separates the first lines of evsel__open_cpu into a new
> __evsel__prepare_open function.
>
> Signed-off-by: Riccardo Mancini <rickyman7@...il.com>
> ---
> tools/perf/util/evsel.c | 45 +++++++++++++++++++++++++++++++----------
> 1 file changed, 34 insertions(+), 11 deletions(-)
>
> diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
> index 7b4bb3229a16524e..ddf324e2e17a0951 100644
> --- a/tools/perf/util/evsel.c
> +++ b/tools/perf/util/evsel.c
> @@ -1746,22 +1746,20 @@ static int perf_event_open(struct evsel *evsel,
> return fd;
> }
>
> -static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
> - struct perf_thread_map *threads,
> - int start_cpu, int end_cpu)
> +
> +static struct perf_cpu_map *empty_cpu_map;
> +static struct perf_thread_map *empty_thread_map;
> +
> +static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
> + struct perf_thread_map *threads)
> {
> - int cpu, thread, nthreads;
> - unsigned long flags = PERF_FLAG_FD_CLOEXEC;
> - int pid = -1, err, old_errno;
> - enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
> + int nthreads;
>
> if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
> (perf_missing_features.aux_output && evsel->core.attr.aux_output))
> return -EINVAL;
>
> if (cpus == NULL) {
> - static struct perf_cpu_map *empty_cpu_map;
> -
> if (empty_cpu_map == NULL) {
> empty_cpu_map = perf_cpu_map__dummy_new();
> if (empty_cpu_map == NULL)
> @@ -1772,8 +1770,6 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
> }
>
> if (threads == NULL) {
> - static struct perf_thread_map *empty_thread_map;
> -
> if (empty_thread_map == NULL) {
> empty_thread_map = thread_map__new_by_tid(-1);
> if (empty_thread_map == NULL)
> @@ -1792,6 +1788,33 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
> perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
> return -ENOMEM;
>
> + return 0;
> +}
> +
> +static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
> + struct perf_thread_map *threads,
> + int start_cpu, int end_cpu)
> +{
> + int cpu, thread, nthreads;
> + unsigned long flags = PERF_FLAG_FD_CLOEXEC;
> + int pid = -1, err, old_errno;
> + enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
> +
> + err = __evsel__prepare_open(evsel, cpus, threads);
> + if (err)
> + return err;
> +
> + if (cpus == NULL)
> + cpus = empty_cpu_map;
> +
> + if (threads == NULL)
> + threads = empty_thread_map;
> +
> + if (evsel->core.system_wide)
> + nthreads = 1;
> + else
> + nthreads = threads->nr;
> +
> if (evsel->cgrp) {
> flags |= PERF_FLAG_PID_CGROUP;
> pid = evsel->cgrp->fd;
> --
> 2.31.1
--
- Arnaldo
Powered by blists - more mailing lists