[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YazXSA3iI2PkygVe@krava>
Date: Sun, 5 Dec 2021 16:14:16 +0100
From: Jiri Olsa <jolsa@...hat.com>
To: Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
Andi Kleen <ak@...ux.intel.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Alexander Antonov <alexander.antonov@...ux.intel.com>,
Alexei Budankov <abudankov@...wei.com>,
Riccardo Mancini <rickyman7@...il.com>
Subject: Re: [PATCH v12 13/16] perf record: Extend --threads command line
option
On Tue, Nov 23, 2021 at 05:08:09PM +0300, Alexey Bayduraev wrote:
SNIP
> return 0;
> }
> @@ -3263,6 +3327,17 @@ static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_c
> set_bit(cpus->map[c], mask->bits);
> }
>
> +static void record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
> +{
> + struct perf_cpu_map *cpus;
> +
> + cpus = perf_cpu_map__new(mask_spec);
> + if (cpus) {
> + record__mmap_cpu_mask_init(mask, cpus);
> + perf_cpu_map__put(cpus);
> + }
we should bail out on perf_cpu_map__new error
jirka
> +}
> +
> static void record__free_thread_masks(struct record *rec, int nr_threads)
> {
> int t;
> @@ -3324,6 +3399,214 @@ static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map
> return 0;
> }
>
> +static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
> + const char **maps_spec, const char **affinity_spec,
> + u32 nr_spec)
> +{
> + u32 s;
> + int ret = 0, t = 0;
> + struct mmap_cpu_mask cpus_mask;
> + struct thread_mask thread_mask, full_mask, *prev_masks;
> +
> + ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu());
> + if (ret)
> + goto out;
> + record__mmap_cpu_mask_init(&cpus_mask, cpus);
> + ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu());
> + if (ret)
> + goto out_free_cpu_mask;
> + ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu());
> + if (ret)
> + goto out_free_thread_mask;
> + record__thread_mask_clear(&full_mask);
> +
> + for (s = 0; s < nr_spec; s++) {
> + record__thread_mask_clear(&thread_mask);
> +
> + record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
> + record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
> +
> + if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
> + cpus_mask.bits, thread_mask.maps.nbits) ||
> + !bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
> + cpus_mask.bits, thread_mask.affinity.nbits))
> + continue;
> +
> + ret = record__thread_mask_intersects(&thread_mask, &full_mask);
> + if (ret)
> + goto out_free_full_mask;
> + record__thread_mask_or(&full_mask, &full_mask, &thread_mask);
> +
> + prev_masks = rec->thread_masks;
> + rec->thread_masks = realloc(rec->thread_masks,
> + (t + 1) * sizeof(struct thread_mask));
> + if (!rec->thread_masks) {
> + pr_err("Failed to allocate thread masks\n");
> + rec->thread_masks = prev_masks;
> + ret = -ENOMEM;
> + goto out_free_full_mask;
> + }
> + rec->thread_masks[t] = thread_mask;
> + if (verbose) {
> + pr_debug("thread_masks[%d]: ", t);
> + mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
> + pr_debug("thread_masks[%d]: ", t);
> + mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
> + }
> + t++;
> + ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu());
> + if (ret)
> + goto out_free_full_mask;
> + }
> +
> + rec->nr_threads = t;
> + pr_debug("nr_threads: %d\n", rec->nr_threads);
> +
> + if (rec->nr_threads <= 0)
> + ret = -EINVAL;
> +
> +out_free_full_mask:
> + record__thread_mask_free(&full_mask);
> +out_free_thread_mask:
> + record__thread_mask_free(&thread_mask);
> +out_free_cpu_mask:
> + record__mmap_cpu_mask_free(&cpus_mask);
> +out:
> + return ret;
> +}
SNIP
Powered by blists - more mailing lists