lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAM9d7cgdQQn5GYB7t++xuoMdeqPXiEkkcop69+rD06RAnu9-EQ@mail.gmail.com>
Date: Mon, 22 Apr 2024 12:45:45 -0700
From: Namhyung Kim <namhyung@...nel.org>
To: weilin.wang@...el.com
Cc: Ian Rogers <irogers@...gle.com>, Arnaldo Carvalho de Melo <acme@...nel.org>, 
	Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>, 
	Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>, 
	Adrian Hunter <adrian.hunter@...el.com>, Kan Liang <kan.liang@...ux.intel.com>, 
	linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org, 
	Perry Taylor <perry.taylor@...el.com>, Samantha Alt <samantha.alt@...el.com>, 
	Caleb Biggers <caleb.biggers@...el.com>
Subject: Re: [RFC PATCH v7 3/6] perf stat: Fork and launch perf record when
 perf stat needs to get retire latency value for a metric.

On Tue, Apr 2, 2024 at 2:46 PM <weilin.wang@...el.com> wrote:
>
> From: Weilin Wang <weilin.wang@...el.com>
>
> When retire_latency value is used in a metric formula, perf stat would fork a
> perf record process with "-e" and "-W" options. Perf record will collect
> required retire_latency values in parallel while perf stat is collecting
> counting values.
>
> At the point of time that perf stat stops counting, it would send sigterm signal
> to perf record process and receiving sampling data back from perf record from a
> pipe. Perf stat will then process the received data to get retire latency data
> and calculate metric result.
>
> Another thread is required to synchronize between perf stat and perf record
> when we pass data through pipe.

I feel like I'm repeating some of the precious review comments..

Still I think it's better to split these special case handling routines from the
normal perf stat code.

>
> Signed-off-by: Weilin Wang <weilin.wang@...el.com>
> Reviewed-by: Ian Rogers <irogers@...gle.com>
> ---
>  tools/perf/builtin-stat.c     | 212 +++++++++++++++++++++++++++++++++-
>  tools/perf/util/metricgroup.h |   8 ++
>  tools/perf/util/stat.h        |   2 +
>  3 files changed, 220 insertions(+), 2 deletions(-)
>
> diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
> index 4558b9d95441..2dcc1a12f7ef 100644
> --- a/tools/perf/builtin-stat.c
> +++ b/tools/perf/builtin-stat.c
> @@ -94,8 +94,13 @@
>  #include <perf/evlist.h>
>  #include <internal/threadmap.h>
>
> +#include "util/sample.h"
> +#include <sys/param.h>
> +#include <subcmd/run-command.h>
> +
>  #define DEFAULT_SEPARATOR      " "
>  #define FREEZE_ON_SMI_PATH     "devices/cpu/freeze_on_smi"
> +#define PERF_DATA              "-"
>
>  static void print_counters(struct timespec *ts, int argc, const char **argv);
>
> @@ -163,6 +168,8 @@ static struct perf_stat_config stat_config = {
>         .ctl_fd_ack             = -1,
>         .iostat_run             = false,
>         .tpebs_events           = LIST_HEAD_INIT(stat_config.tpebs_events),
> +       .tpebs_results          = LIST_HEAD_INIT(stat_config.tpebs_results),

Can we combine these two lists?  Why not move the result fields to tpebs_events?


> +       .tpebs_pid              = -1,
>  };
>
>  static bool cpus_map_matched(struct evsel *a, struct evsel *b)
> @@ -684,15 +691,173 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
>
>         if (child_pid != -1)
>                 kill(child_pid, SIGTERM);
> +       if (stat_config.tpebs_pid != -1)
> +               kill(stat_config.tpebs_pid, SIGTERM);
>         return COUNTER_FATAL;
>  }
>
> -static int __run_perf_record(void)
> +static int __run_perf_record(const char **record_argv)
>  {
> +       int i = 0;
> +       struct tpebs_event *e;
> +
>         pr_debug("Prepare perf record for retire_latency\n");
> +
> +       record_argv[i++] = "perf";
> +       record_argv[i++] = "record";
> +       record_argv[i++] = "-W";
> +       record_argv[i++] = "--synth=no";
> +
> +       if (stat_config.user_requested_cpu_list) {
> +               record_argv[i++] = "-C";
> +               record_argv[i++] = stat_config.user_requested_cpu_list;
> +       }
> +
> +       if (stat_config.system_wide)
> +               record_argv[i++] = "-a";
> +
> +       if (!stat_config.system_wide && !stat_config.user_requested_cpu_list) {
> +               pr_err("Require -a or -C option to run sampling.\n");
> +               return -ECANCELED;
> +       }
> +
> +       list_for_each_entry(e, &stat_config.tpebs_events, nd) {
> +               record_argv[i++] = "-e";
> +               record_argv[i++] = e->name;
> +       }
> +
> +       record_argv[i++] = "-o";
> +       record_argv[i++] = PERF_DATA;
> +
> +       return 0;
> +}
> +
> +static void prepare_run_command(struct child_process *cmd,
> +                              const char **argv)
> +{
> +       memset(cmd, 0, sizeof(*cmd));
> +       cmd->argv = argv;
> +       cmd->out = -1;
> +}
> +
> +static int prepare_perf_record(struct child_process *cmd)
> +{
> +       const char **record_argv;
> +       int ret;
> +
> +       record_argv = calloc(10 + 2 * stat_config.tpebs_event_size, sizeof(char *));
> +       if (!record_argv)
> +               return -1;
> +
> +       ret = __run_perf_record(record_argv);
> +       if (ret)
> +               return ret;

free(record_argv) before return.

> +
> +       prepare_run_command(cmd, record_argv);
> +       return start_command(cmd);
> +}
> +
> +struct perf_script {
> +       struct perf_tool        tool;
> +       struct perf_session     *session;
> +};
> +
> +static void tpebs_event_name__delete(struct tpebs_event *e)
> +{
> +       zfree(&e->name);
> +       zfree(&e->tpebs_name);
> +}
> +
> +static void tpebs_event__delete(struct tpebs_event *e)
> +{
> +       tpebs_event_name__delete(e);
> +       free(e);
> +}
> +
> +static void tpebs_retire_lat__delete(struct tpebs_retire_lat *r)
> +{
> +       tpebs_event_name__delete(&r->event);
> +       free(r);
> +}
> +
> +static void tpebs_data__delete(void)
> +{
> +       struct tpebs_retire_lat *r, *rtmp;
> +       struct tpebs_event *e, *etmp;
> +
> +       list_for_each_entry_safe(r, rtmp, &stat_config.tpebs_results, event.nd) {
> +               list_del_init(&r->event.nd);
> +               tpebs_retire_lat__delete(r);
> +       }
> +       list_for_each_entry_safe(e, etmp, &stat_config.tpebs_events, nd) {
> +               list_del_init(&e->nd);
> +               tpebs_event__delete(e);
> +       }
> +}
> +
> +static int process_sample_event(struct perf_tool *tool __maybe_unused,
> +                               union perf_event *event __maybe_unused,
> +                               struct perf_sample *sample,
> +                               struct evsel *evsel,
> +                               struct machine *machine __maybe_unused)
> +{
> +       int ret = 0;
> +       const char *evname;
> +       struct tpebs_retire_lat *t;
> +
> +       evname = evsel__name(evsel);
> +
> +       /*
> +        * Need to handle per core results? We are assuming average retire
> +        * latency value will be used. Save the number of samples and the sum of
> +        * retire latency value for each event.
> +        */
> +       list_for_each_entry(t, &stat_config.tpebs_results, event.nd) {
> +               if (!strcmp(evname, t->event.name)) {
> +                       t->count += 1;
> +                       t->sum += sample->retire_lat;
> +                       break;
> +               }
> +       }
> +
> +       return ret;
> +}
> +
> +static int process_feature_event(struct perf_session *session,
> +                                union perf_event *event)
> +{
> +       if (event->feat.feat_id < HEADER_LAST_FEATURE)
> +               return perf_event__process_feature(session, event);
>         return 0;
>  }
>
> +static void *__sample_reader(void *arg)
> +{
> +       struct child_process *cmd = arg;
> +       struct perf_session *session;
> +       struct perf_data data = {
> +               .mode = PERF_DATA_MODE_READ,
> +               .path = PERF_DATA,
> +               .file.fd = cmd->out,
> +       };
> +       struct perf_script script = {
> +               .tool = {
> +               .sample          = process_sample_event,
> +               .feature         = process_feature_event,
> +               .attr            = perf_event__process_attr,
> +               },
> +       };
> +
> +       session = perf_session__new(&data, &script.tool);
> +       if (IS_ERR(session))
> +               return NULL;
> +       script.session = session;
> +       perf_session__process_events(session);
> +       perf_session__delete(session);
> +
> +       return NULL;
> +}

Now we have weight[123] fields to show the average weight.
Can we simply use it like below?

  $ perf mem record -t load -o- timeout 1 perf test -w datasym | \
    perf report -i- -q -F weight1
  [ perf record: Woken up 3 times to write data ]
  [ perf record: Captured and wrote 0.614 MB - ]
      92.2             <--- this


> +
>  static int __run_perf_stat(int argc, const char **argv, int run_idx)
>  {
>         int interval = stat_config.interval;
> @@ -709,6 +874,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>         struct affinity saved_affinity, *affinity = NULL;
>         int err;
>         bool second_pass = false;
> +       struct child_process cmd;
> +       pthread_t reader_thread;
>
>         /*
>          * Prepare perf record for sampling event retire_latency before fork and
> @@ -716,10 +883,35 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>          */
>         if (stat_config.tpebs_event_size > 0) {
>                 int ret;
> +               struct tpebs_event *e;
> +
> +               pr_debug("perf stat pid = %d\n", getpid());
> +               list_for_each_entry(e, &stat_config.tpebs_events, nd) {
> +                       struct tpebs_retire_lat *new = malloc(sizeof(struct tpebs_retire_lat));
>
> -               ret = __run_perf_record();
> +                       if (!new)
> +                               return -1;
> +                       new->event.name = strdup(e->name);
> +                       if (!new->event.name)
> +                               return -ENOMEM;
> +                       new->event.tpebs_name = strdup(e->tpebs_name);
> +                       if (!new->event.tpebs_name)
> +                               return -ENOMEM;
> +                       new->count = 0;
> +                       new->sum = 0;
> +                       list_add_tail(&new->event.nd, &stat_config.tpebs_results);
> +               }
> +               ret = prepare_perf_record(&cmd);
>                 if (ret)
>                         return ret;
> +               if (pthread_create(&reader_thread, NULL, __sample_reader, &cmd)) {
> +                       kill(cmd.pid, SIGTERM);
> +                       close(cmd.out);
> +                       pr_err("Could not create thread to process sample data.\n");
> +                       return -1;
> +               }
> +               /* Wait for perf record initialization a little bit.*/
> +               sleep(2);

This won't guarantee anything and just slows down the normal case.
You'd better make the record process wait until the reader is ready.

Thanks,
Namhyung


>         }
>
>         if (forks) {
> @@ -927,6 +1119,17 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>
>         t1 = rdclock();
>
> +       if (stat_config.tpebs_event_size > 0) {
> +               int ret;
> +
> +               kill(cmd.pid, SIGTERM);
> +               pthread_join(reader_thread, NULL);
> +               close(cmd.out);
> +               ret = finish_command(&cmd);
> +               if (ret != -ERR_RUN_COMMAND_WAITPID_SIGNAL)
> +                       return ret;
> +       }
> +
>         if (stat_config.walltime_run_table)
>                 stat_config.walltime_run[run_idx] = t1 - t0;
>
> @@ -1034,6 +1237,9 @@ static void sig_atexit(void)
>         if (child_pid != -1)
>                 kill(child_pid, SIGTERM);
>
> +       if (stat_config.tpebs_pid != -1)
> +               kill(stat_config.tpebs_pid, SIGTERM);
> +
>         sigprocmask(SIG_SETMASK, &oset, NULL);
>
>         if (signr == -1)
> @@ -2974,5 +3180,7 @@ int cmd_stat(int argc, const char **argv)
>         metricgroup__rblist_exit(&stat_config.metric_events);
>         evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close);
>
> +       tpebs_data__delete();
> +
>         return status;
>  }
> diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
> index 7c24ed768ff3..ae788edef30f 100644
> --- a/tools/perf/util/metricgroup.h
> +++ b/tools/perf/util/metricgroup.h
> @@ -68,10 +68,18 @@ struct metric_expr {
>
>  struct tpebs_event {
>         struct list_head nd;
> +       /* Event name */
>         const char *name;
> +       /* Event name with the TPEBS modifier R */
>         const char *tpebs_name;
>  };
>
> +struct tpebs_retire_lat {
> +       struct tpebs_event event;
> +       size_t count;
> +       int sum;
> +};
> +
>  struct metric_event *metricgroup__lookup(struct rblist *metric_events,
>                                          struct evsel *evsel,
>                                          bool create);
> diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
> index b987960df3c5..0726bdc06681 100644
> --- a/tools/perf/util/stat.h
> +++ b/tools/perf/util/stat.h
> @@ -111,6 +111,8 @@ struct perf_stat_config {
>         struct rblist            metric_events;
>         struct list_head         tpebs_events;
>         size_t                   tpebs_event_size;
> +       struct list_head         tpebs_results;
> +       pid_t                    tpebs_pid;
>         int                      ctl_fd;
>         int                      ctl_fd_ack;
>         bool                     ctl_fd_close;
> --
> 2.43.0
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ