lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAM9d7chAgocCfA-m8YzeYtb31N53OPiqvzTU9kstts=c2EDCBg@mail.gmail.com>
Date:   Tue, 13 Apr 2021 10:02:11 +0900
From:   Namhyung Kim <namhyung@...nel.org>
To:     "Bayduraev, Alexey V" <alexey.v.bayduraev@...ux.intel.com>
Cc:     Arnaldo Carvalho de Melo <acme@...nel.org>,
        Jiri Olsa <jolsa@...hat.com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        linux-kernel <linux-kernel@...r.kernel.org>,
        Andi Kleen <ak@...ux.intel.com>,
        Adrian Hunter <adrian.hunter@...el.com>,
        Alexei Budankov <abudankov@...wei.com>,
        Alexander Antonov <alexander.antonov@...ux.intel.com>
Subject: Re: [PATCH v4 08/12] perf record: introduce --threads=<spec> command
 line option

Hello,

On Tue, Apr 6, 2021 at 5:49 PM Bayduraev, Alexey V
<alexey.v.bayduraev@...ux.intel.com> wrote:
>
>
> Provide --threads option in perf record command line interface.
> The option can have a value in the form of masks that specify
> cpus to be monitored with data streaming threads and its layout
> in system topology. The masks can be filtered using cpu mask
> provided via -C option.
>
> The specification value can be user defined list of masks. Masks
> separated by colon define cpus to be monitored by one thread and
> affinity mask of that thread is separated by slash. For example:
> <cpus mask 1>/<affinity mask 1>:<cpu mask 2>/<affinity mask 2>
> specifies parallel threads layout that consists of two threads
> with corresponding assigned cpus to be monitored.
>
> The specification value can be a string e.g. "cpu", "core" or
> "socket" meaning creation of data streaming thread for every
> cpu or core or socket to monitor distinct cpus or cpus grouped
> by core or socket.
>
> The option provided with no or empty value defaults to per-cpu
> parallel threads layout creating data streaming thread for every
> cpu being monitored.
>
> Feature design and implementation are based on prototypes [1], [2].
>
> [1] git clone https://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf.git -b perf/record_threads
> [2] https://lore.kernel.org/lkml/20180913125450.21342-1-jolsa@kernel.org/
>
> Suggested-by: Jiri Olsa <jolsa@...nel.org>
> Suggested-by: Namhyung Kim <namhyung@...nel.org>
> Signed-off-by: Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>
> ---
[SNIP]
> +static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
> +                                         char **maps_spec, char **affinity_spec, u32 nr_spec)
> +{
> +       u32 s;
> +       int ret, nr_threads = 0;
> +       struct mmap_cpu_mask cpus_mask;
> +       struct thread_mask thread_mask, full_mask;
> +
> +       ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu());
> +       if (ret)
> +               return ret;
> +       record__mmap_cpu_mask_init(&cpus_mask, cpus);
> +       ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu());
> +       if (ret)
> +               goto out_free_cpu_mask;
> +       ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu());
> +       if (ret)
> +               goto out_free_thread_mask;
> +       record__thread_mask_clear(&full_mask);
> +
> +       for (s = 0; s < nr_spec; s++) {
> +               record__thread_mask_clear(&thread_mask);
> +
> +               record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
> +               record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
> +
> +               if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
> +                               cpus_mask.bits, thread_mask.maps.nbits) ||
> +                   !bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
> +                               cpus_mask.bits, thread_mask.affinity.nbits))
> +                       continue;
> +
> +               ret = record__thread_mask_intersects(&thread_mask, &full_mask);
> +               if (ret)
> +                       return ret;

I think you should free other masks.

> +               record__thread_mask_or(&full_mask, &full_mask, &thread_mask);
> +
> +               rec->thread_masks = realloc(rec->thread_masks,
> +                                           (nr_threads + 1) * sizeof(struct thread_mask));
> +               if (!rec->thread_masks) {
> +                       pr_err("Failed to allocate thread masks\n");
> +                       ret = -ENOMEM;
> +                       goto out_free_full_mask;

But this will leak rec->thread_masks as it's overwritten.


> +               }
> +               rec->thread_masks[nr_threads] = thread_mask;
> +               pr_debug("thread_masks[%d]: addr=", nr_threads);
> +               mmap_cpu_mask__scnprintf(&rec->thread_masks[nr_threads].maps, "maps");
> +               pr_debug("thread_masks[%d]: addr=", nr_threads);
> +               mmap_cpu_mask__scnprintf(&rec->thread_masks[nr_threads].affinity, "affinity");
> +               nr_threads++;
> +               ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu());
> +               if (ret)
> +                       return ret;

Ditto, use goto.

> +       }
> +
> +       rec->nr_threads = nr_threads;
> +       pr_debug("threads: nr_threads=%d\n", rec->nr_threads);
> +
> +out_free_full_mask:
> +       record__thread_mask_free(&full_mask);
> +out_free_thread_mask:
> +       record__thread_mask_free(&thread_mask);
> +out_free_cpu_mask:
> +       record__mmap_cpu_mask_free(&cpus_mask);
> +
> +       return 0;
> +}

[SNIP]
> +
> +static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus)
> +{
> +       int t, ret;
> +       u32 s, nr_spec = 0;
> +       char **maps_spec = NULL, **affinity_spec = NULL;
> +       char *spec, *spec_ptr, *user_spec, *mask, *mask_ptr;
> +
> +       for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) {
> +               spec = strtok_r(user_spec, ":", &spec_ptr);
> +               if (spec == NULL)
> +                       break;
> +               pr_debug(" spec[%d]: %s\n", t, spec);
> +               mask = strtok_r(spec, "/", &mask_ptr);
> +               if (mask == NULL)
> +                       break;
> +               pr_debug("  maps mask: %s\n", mask);
> +               maps_spec = realloc(maps_spec, (nr_spec + 1) * sizeof(char *));
> +               if (!maps_spec) {
> +                       pr_err("Failed to realloc maps_spec\n");
> +                       ret = -ENOMEM;
> +                       goto out_free_all_specs;

It'd crash as maps_spec is NULL now.

> +               }
> +               maps_spec[nr_spec] = strdup(mask);

You'd better check the return value.

> +               mask = strtok_r(NULL, "/", &mask_ptr);
> +               if (mask == NULL)
> +                       break;
> +               pr_debug("  affinity mask: %s\n", mask);
> +               affinity_spec = realloc(affinity_spec, (nr_spec + 1) * sizeof(char *));
> +               if (!maps_spec) {

s/maps/affinity/ and it has the same problem.

> +                       pr_err("Failed to realloc affinity_spec\n");
> +                       ret = -ENOMEM;
> +                       goto out_free_all_specs;
> +               }
> +               affinity_spec[nr_spec] = strdup(mask);

Check the return value.

Thanks,
Namhyung

> +               nr_spec++;
> +       }
> +
> +       ret = record__init_thread_masks_spec(rec, cpus, maps_spec, affinity_spec, nr_spec);
> +
> +out_free_all_specs:
> +       for (s = 0; s < nr_spec; s++) {
> +               free(maps_spec[s]);
> +               free(affinity_spec[s]);
> +       }
> +       free(affinity_spec);
> +       free(maps_spec);
> +
> +       return ret;
> +}
> +

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ