lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c3f5c4ecfc86ec1de29f6db681b2e5fce7ef23a3.camel@gmail.com>
Date:   Fri, 04 Jun 2021 01:28:17 +0200
From:   Riccardo Mancini <rickyman7@...il.com>
To:     Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>
Cc:     Jiri Olsa <jolsa@...hat.com>, Namhyung Kim <namhyung@...nel.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        linux-kernel <linux-kernel@...r.kernel.org>,
        Andi Kleen <ak@...ux.intel.com>,
        Adrian Hunter <adrian.hunter@...el.com>,
        Alexander Antonov <alexander.antonov@...ux.intel.com>,
        Alexei Budankov <abudankov@...wei.com>,
        linux-perf-users@...r.kernel.org, Ian Rogers <irogers@...gle.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>
Subject: Re: [PATCH v6 20/20] perf session: Load data directory files for
 analysis

Hi,

On Wed, 2021-05-26 at 13:53 +0300, Alexey Bayduraev wrote:
> Load data directory files and provide basic raw dump and aggregated
> analysis support of data directories in report mode, still with no
> memory consumption optimizations.
> 
> Design and implementation are based on the prototype [1], [2].
> 
> [1] git clone https://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf.git -
> b perf/record_threads
> [2] https://lore.kernel.org/lkml/20180913125450.21342-1-jolsa@kernel.org/
> 
> Suggested-by: Jiri Olsa <jolsa@...nel.org>
> Signed-off-by: Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>
> ---
>  tools/perf/util/session.c | 129 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 129 insertions(+)
> 
> diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
> index 041601810b85..dd4ef9749cd0 100644
> --- a/tools/perf/util/session.c
> +++ b/tools/perf/util/session.c
> @@ -65,6 +65,7 @@ struct reader_state {
>         u64      data_size;
>         u64      head;
>         bool     eof;
> +       u64      size;
>  };
>  
>  enum {
> @@ -2319,6 +2320,7 @@ reader__read_event(struct reader *rd, struct
> perf_session *session,
>         if (skip)
>                 size += skip;
>  
> +       st->size += size;
>         st->head += size;
>         st->file_pos += size;
>  
> @@ -2418,6 +2420,130 @@ static int __perf_session__process_events(struct
> perf_session *session)
>         return err;
>  }
>  
> +/*
> + * This function reads, merge and process directory data.
> + * It assumens the version 1 of directory data, where each
> + * data file holds per-cpu data, already sorted by kernel.
> + */
> +static int __perf_session__process_dir_events(struct perf_session *session)
> +{
> +       struct perf_data *data = session->data;
> +       struct perf_tool *tool = session->tool;
> +       int i, ret = 0, readers = 1;
> +       struct ui_progress prog;
> +       u64 total_size = perf_data__size(session->data);
> +       struct reader *rd;
> +
> +       perf_tool__fill_defaults(tool);
> +
> +       ui_progress__init_size(&prog, total_size, "Sorting events...");
> +
> +       for (i = 0; i < data->dir.nr; i++) {
> +               if (data->dir.files[i].size)
> +                       readers++;
> +       }
> +
> +       rd = session->readers = zalloc(readers * sizeof(struct reader));
> +       if (!rd)
> +               return -ENOMEM;
> +       session->nr_readers = readers;
> +       readers = 0;
> +
> +       rd[readers] = (struct reader) {
> +               .fd              = perf_data__fd(session->data),
> +               .path            = session->data->file.path,
> +               .data_size       = session->header.data_size,
> +               .data_offset     = session->header.data_offset,
> +               .in_place_update = session->data->in_place_update,
> +       };
> +       ret = reader__init(&rd[readers], NULL);
> +       if (ret)
> +               goto out_err;
> +       ret = reader__mmap(&rd[readers], session);
> +       if (ret != READER_OK) {
> +               if (ret == READER_EOF)
> +                       ret = -EINVAL;
> +               goto out_err;
> +       }
> +       readers++;
> +
> +       for (i = 0; i < data->dir.nr; i++) {
> +               if (data->dir.files[i].size) {
> +                       rd[readers] = (struct reader) {
> +                               .fd              = data->dir.files[i].fd,
> +                               .path            = data->dir.files[i].path,
> +                               .data_size       = data->dir.files[i].size,
> +                               .data_offset     = 0,
> +                               .in_place_update = session->data-
> >in_place_update,
> +                       };
> +                       ret = reader__init(&rd[readers], NULL);

zstd_fini is never called on rd[readers].zstd_data
Maybe it can be done in perf_session__delete. For example, we could add a new
reader__fini function to do the cleanup of zstd data and
perf_decomp__release_events.

Thanks,
Riccardo

> +                       if (ret)
> +                               goto out_err;
> +                       ret = reader__mmap(&rd[readers], session);
> +                       if (ret != READER_OK) {
> +                               if (ret == READER_EOF)
> +                                       ret = -EINVAL;
> +                               goto out_err;
> +                       }
> +                       readers++;
> +               }
> +       }
> +
> +       i = 0;
> +
> +       while ((ret >= 0) && readers) {
> +               if (session_done())
> +                       return 0;
> +
> +               if (rd[i].state.eof) {
> +                       i = (i + 1) % session->nr_readers;
> +                       continue;
> +               }
> +
> +               ret = reader__read_event(&rd[i], session, &prog);
> +               if (ret < 0)
> +                       break;
> +               if (ret == READER_EOF) {
> +                       ret = reader__mmap(&rd[i], session);
> +                       if (ret < 0)
> +                               goto out_err;
> +                       if (ret == READER_EOF)
> +                               readers--;
> +               }
> +
> +               /*
> +                * Processing 10MBs of data from each reader in sequence,
> +                * because that's the way the ordered events sorting works
> +                * most efficiently.
> +                */
> +               if (rd[i].state.size >= 10*1024*1024) {
> +                       rd[i].state.size = 0;
> +                       i = (i + 1) % session->nr_readers;
> +               }
> +       }
> +
> +       ret = ordered_events__flush(&session->ordered_events,
> OE_FLUSH__FINAL);
> +       if (ret)
> +               goto out_err;
> +
> +       ret = perf_session__flush_thread_stacks(session);
> +out_err:
> +       ui_progress__finish();
> +
> +       if (!tool->no_warn)
> +               perf_session__warn_about_errors(session);
> +
> +       /*
> +        * We may switching perf.data output, make ordered_events
> +        * reusable.
> +        */
> +       ordered_events__reinit(&session->ordered_events);
> +
> +       session->one_mmap = false;
> +
> +       return ret;
> +}
> +
>  int perf_session__process_events(struct perf_session *session)
>  {
>         if (perf_session__register_idle_thread(session) < 0)
> @@ -2426,6 +2552,9 @@ int perf_session__process_events(struct perf_session
> *session)
>         if (perf_data__is_pipe(session->data))
>                 return __perf_session__process_pipe_events(session);
>  
> +       if (perf_data__is_dir(session->data))
> +               return __perf_session__process_dir_events(session);
> +
>         return __perf_session__process_events(session);
>  }
>  


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ