lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAM9d7ciMT6XdeAfnQ-kad4gKtHjiSVr66OjhP6y8rfbxKvpAfA@mail.gmail.com>
Date:   Tue, 11 Oct 2022 16:44:35 -0700
From:   Namhyung Kim <namhyung@...nel.org>
To:     Ian Rogers <irogers@...gle.com>
Cc:     Arnaldo Carvalho de Melo <acme@...nel.org>,
        Jiri Olsa <jolsa@...nel.org>, Ingo Molnar <mingo@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        LKML <linux-kernel@...r.kernel.org>,
        Adrian Hunter <adrian.hunter@...el.com>,
        linux-perf-users <linux-perf-users@...r.kernel.org>,
        Kan Liang <kan.liang@...ux.intel.com>,
        Leo Yan <leo.yan@...aro.org>, Andi Kleen <ak@...ux.intel.com>,
        Athira Rajeev <atrajeev@...ux.vnet.ibm.com>,
        James Clark <james.clark@....com>,
        Xing Zhengjun <zhengjun.xing@...ux.intel.com>
Subject: Re: [PATCH 09/19] perf stat: Aggregate events using evsel->stats->aggr

On Mon, Oct 10, 2022 at 4:11 PM Ian Rogers <irogers@...gle.com> wrote:
>
> On Sun, Oct 9, 2022 at 10:36 PM Namhyung Kim <namhyung@...nel.org> wrote:
> >
> > Add a logic to aggregate counter values to the new evsel->stats->aggr.
> > This is not used yet so shadow stats are not updated.  But later patch
> > will convert the existing code to use it.
> >
> > With that, we don't need to handle AGGR_GLOBAL specially anymore.  It
> > can use the same logic with counts, prev_counts and aggr_counts.
> >
> > Signed-off-by: Namhyung Kim <namhyung@...nel.org>
> > ---
> >  tools/perf/builtin-stat.c                     |  3 --
> >  tools/perf/util/evsel.c                       |  9 +---
> >  .../scripting-engines/trace-event-python.c    |  6 ---
> >  tools/perf/util/stat.c                        | 46 ++++++++++++++++---
> >  4 files changed, 41 insertions(+), 23 deletions(-)
> >
> > diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
> > index eaddafbd7ff2..139e35ed68d3 100644
> > --- a/tools/perf/builtin-stat.c
> > +++ b/tools/perf/builtin-stat.c
> > @@ -963,9 +963,6 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
> >                 init_stats(&walltime_nsecs_stats);
> >                 update_stats(&walltime_nsecs_stats, t1 - t0);
> >
> > -               if (stat_config.aggr_mode == AGGR_GLOBAL)
> > -                       evlist__save_aggr_prev_raw_counts(evsel_list);
> > -
> >                 evlist__copy_prev_raw_counts(evsel_list);
> >                 evlist__reset_prev_raw_counts(evsel_list);
> >                 perf_stat__reset_shadow_per_stat(&rt_stat);
> > diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
> > index a6ea91c72659..a1fcb3166149 100644
> > --- a/tools/perf/util/evsel.c
> > +++ b/tools/perf/util/evsel.c
> > @@ -1526,13 +1526,8 @@ void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
> >         if (!evsel->prev_raw_counts)
> >                 return;
> >
> > -       if (cpu_map_idx == -1) {
> > -               tmp = evsel->prev_raw_counts->aggr;
> > -               evsel->prev_raw_counts->aggr = *count;
> > -       } else {
> > -               tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
> > -               *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
> > -       }
> > +       tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
> > +       *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
> >
> >         count->val = count->val - tmp.val;
> >         count->ena = count->ena - tmp.ena;
> > diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
> > index 1f2040f36d4e..7bc8559dce6a 100644
> > --- a/tools/perf/util/scripting-engines/trace-event-python.c
> > +++ b/tools/perf/util/scripting-engines/trace-event-python.c
> > @@ -1653,12 +1653,6 @@ static void python_process_stat(struct perf_stat_config *config,
> >         struct perf_cpu_map *cpus = counter->core.cpus;
> >         int cpu, thread;
> >
> > -       if (config->aggr_mode == AGGR_GLOBAL) {
> > -               process_stat(counter, (struct perf_cpu){ .cpu = -1 }, -1, tstamp,
> > -                            &counter->counts->aggr);
> > -               return;
> > -       }
> > -
> >         for (thread = 0; thread < threads->nr; thread++) {
> >                 for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
> >                         process_stat(counter, perf_cpu_map__cpu(cpus, cpu),
> > diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
> > index 374149628507..99874254809d 100644
> > --- a/tools/perf/util/stat.c
> > +++ b/tools/perf/util/stat.c
> > @@ -387,6 +387,7 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
> >                        struct perf_counts_values *count)
> >  {
> >         struct perf_counts_values *aggr = &evsel->counts->aggr;
> > +       struct perf_stat_evsel *ps = evsel->stats;
> >         static struct perf_counts_values zero;
> >         bool skip = false;
> >
> > @@ -398,6 +399,44 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
> >         if (skip)
> >                 count = &zero;
> >
> > +       if (!evsel->snapshot)
> > +               evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
> > +       perf_counts_values__scale(count, config->scale, NULL);
> > +
> > +       if (ps->aggr) {
> > +               struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
> > +               struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
> > +               struct perf_stat_aggr *ps_aggr;
> > +               int i;
> > +
> > +               for (i = 0; i < ps->nr_aggr; i++) {
>
> Would it be cleaner to have a helper function here that returns i or
> ps_aggr for the first CPU being aggregated into? That would avoid the
> continue/break.

Right, we need cpu -> aggr_idx mapping.

>
> > +                       if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
> > +                               continue;
> > +
> > +                       ps_aggr = &ps->aggr[i];
> > +                       ps_aggr->nr++;
> > +
> > +                       /*
> > +                        * When any result is bad, make them all to give
> > +                        * consistent output in interval mode.
> > +                        */
> > +                       if (count->ena == 0 || count->run == 0 ||
> > +                           evsel->counts->scaled == -1) {
> > +                               ps_aggr->counts.val = 0;
> > +                               ps_aggr->counts.ena = 0;
> > +                               ps_aggr->counts.run = 0;
> > +                               ps_aggr->failed = true;
> > +                       }
> > +
> > +                       if (!ps_aggr->failed) {
> > +                               ps_aggr->counts.val += count->val;
> > +                               ps_aggr->counts.ena += count->ena;
> > +                               ps_aggr->counts.run += count->run;
> > +                       }
> > +                       break;
> > +               }
> > +       }
> > +
> >         switch (config->aggr_mode) {
> >         case AGGR_THREAD:
> >         case AGGR_CORE:
> > @@ -405,9 +444,6 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
> >         case AGGR_SOCKET:
> >         case AGGR_NODE:
> >         case AGGR_NONE:
> > -               if (!evsel->snapshot)
> > -                       evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
> > -               perf_counts_values__scale(count, config->scale, NULL);
> >                 if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
> >                         perf_stat__update_shadow_stats(evsel, count->val,
> >                                                        cpu_map_idx, &rt_stat);
> > @@ -469,10 +505,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
> >         if (config->aggr_mode != AGGR_GLOBAL)
> >                 return 0;
> >
> > -       if (!counter->snapshot)
> > -               evsel__compute_deltas(counter, -1, -1, aggr);
> > -       perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
> > -
>
> It isn't clear to me how this relates to the patch.

It's moved to process_counter_values() to be handled like other
aggr_mode.

Thanks,
Namhyung


>
> >         update_stats(&ps->res_stats, *count);
> >
> >         if (verbose > 0) {
> > --
> > 2.38.0.rc1.362.ged0d419d3c-goog
> >

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ