[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201120100125.GB94830@google.com>
Date: Fri, 20 Nov 2020 19:01:25 +0900
From: Namhyung Kim <namhyung@...nel.org>
To: Alexey Budankov <alexey.budankov@...ux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>,
Jiri Olsa <jolsa@...hat.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
Andi Kleen <ak@...ux.intel.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>,
Alexander Antonov <alexander.antonov@...ux.intel.com>
Subject: Re: [PATCH v3 01/12] perf record: introduce thread affinity and mmap
masks
On Mon, Nov 16, 2020 at 03:14:50PM +0300, Alexey Budankov wrote:
>
> Introduce affinity and mmap thread masks. Thread affinity mask
> defines cpus that a thread is allowed to run on. Thread maps
> mask defines mmap data buffers the thread serves to stream
> profiling data from.
>
> Signed-off-by: Alexey Budankov <alexey.budankov@...ux.intel.com>
> ---
> tools/perf/builtin-record.c | 116 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 116 insertions(+)
>
> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> index adf311d15d3d..82f009703ad7 100644
> --- a/tools/perf/builtin-record.c
> +++ b/tools/perf/builtin-record.c
[SNIP]
> +static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
> +{
> + int t, ret;
> +
> + rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
> + if (!rec->thread_masks) {
> + pr_err("Failed to allocate thread masks\n");
> + return -ENOMEM;
> + }
> +
> + for (t = 0; t < nr_threads; t++) {
> + ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
> + if (ret)
> + return ret;
> + record__thread_mask_clear(&rec->thread_masks[t]);
> + }
> +
> + return 0;
> +}
> +static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
> +{
> + int ret;
> +
> + ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu());
> + if (ret)
> + return ret;
> +
> + record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
> +
> + rec->nr_threads = 1;
> +
> + return 0;
> +}
> +
> +static int record__init_thread_masks(struct record *rec)
> +{
> + struct perf_cpu_map *cpus = rec->evlist->core.cpus;
> +
> + return record__init_thread_default_masks(rec, cpus);
> +}
> +
> +static int record__fini_thread_masks(struct record *rec)
> +{
> + int t;
> +
> + for (t = 0; t < rec->nr_threads; t++)
> + record__thread_mask_free(&rec->thread_masks[t]);
It might be failed when allocating rec->thread_masks.
Thanks
Namhyung
> +
> + zfree(&rec->thread_masks);
> +
> + rec->nr_threads = 0;
> +
> + return 0;
> +}
> +
> int cmd_record(int argc, const char **argv)
> {
> int err;
> @@ -2821,6 +2930,12 @@ int cmd_record(int argc, const char **argv)
> goto out;
> }
>
> + err = record__init_thread_masks(rec);
> + if (err) {
> + pr_err("record__init_thread_masks failed, error %d\n", err);
> + goto out;
> + }
> +
> if (rec->opts.nr_cblocks > nr_cblocks_max)
> rec->opts.nr_cblocks = nr_cblocks_max;
> pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
> @@ -2839,6 +2954,7 @@ int cmd_record(int argc, const char **argv)
> symbol__exit();
> auxtrace_record__free(rec->itr);
> out_opts:
> + record__fini_thread_masks(rec);
> evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
> return err;
> }
> --
> 2.24.1
>
Powered by blists - more mailing lists