[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YazXUB4C9aTi9h+0@krava>
Date: Sun, 5 Dec 2021 16:14:24 +0100
From: Jiri Olsa <jolsa@...hat.com>
To: Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
Andi Kleen <ak@...ux.intel.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Alexander Antonov <alexander.antonov@...ux.intel.com>,
Alexei Budankov <abudankov@...wei.com>,
Riccardo Mancini <rickyman7@...il.com>
Subject: Re: [PATCH v12 13/16] perf record: Extend --threads command line
option
On Tue, Nov 23, 2021 at 05:08:09PM +0300, Alexey Bayduraev wrote:
SNIP
>
> +static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
> + const char **maps_spec, const char **affinity_spec,
> + u32 nr_spec)
> +{
> + u32 s;
> + int ret = 0, t = 0;
> + struct mmap_cpu_mask cpus_mask;
> + struct thread_mask thread_mask, full_mask, *prev_masks;
> +
> + ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu());
> + if (ret)
> + goto out;
> + record__mmap_cpu_mask_init(&cpus_mask, cpus);
> + ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu());
> + if (ret)
> + goto out_free_cpu_mask;
> + ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu());
> + if (ret)
> + goto out_free_thread_mask;
> + record__thread_mask_clear(&full_mask);
> +
> + for (s = 0; s < nr_spec; s++) {
> + record__thread_mask_clear(&thread_mask);
> +
> + record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
> + record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
> +
> + if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
> + cpus_mask.bits, thread_mask.maps.nbits) ||
> + !bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
> + cpus_mask.bits, thread_mask.affinity.nbits))
> + continue;
> +
> + ret = record__thread_mask_intersects(&thread_mask, &full_mask);
> + if (ret)
> + goto out_free_full_mask;
> + record__thread_mask_or(&full_mask, &full_mask, &thread_mask);
please put some explanation to comment on the intersection checks,
like what's allowed and what's not.. and also some info about that
to documentation
thanks,
jirka
> +
> + prev_masks = rec->thread_masks;
> + rec->thread_masks = realloc(rec->thread_masks,
> + (t + 1) * sizeof(struct thread_mask));
> + if (!rec->thread_masks) {
> + pr_err("Failed to allocate thread masks\n");
> + rec->thread_masks = prev_masks;
> + ret = -ENOMEM;
> + goto out_free_full_mask;
> + }
> + rec->thread_masks[t] = thread_mask;
> + if (verbose) {
> + pr_debug("thread_masks[%d]: ", t);
> + mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
> + pr_debug("thread_masks[%d]: ", t);
> + mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
> + }
> + t++;
> + ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu());
> + if (ret)
> + goto out_free_full_mask;
> + }
SNIP
Powered by blists - more mailing lists