lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9506b14fe2965e4145c034715eb10e02f2137f7b.camel@gmail.com>
Date:   Fri, 03 Sep 2021 23:52:18 +0200
From:   Riccardo Mancini <rickyman7@...il.com>
To:     Arnaldo Carvalho de Melo <arnaldo.melo@...il.com>
Cc:     Ian Rogers <irogers@...gle.com>,
        Namhyung Kim <namhyung@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Mark Rutland <mark.rutland@....com>,
        Jiri Olsa <jolsa@...hat.com>, linux-kernel@...r.kernel.org,
        linux-perf-users@...r.kernel.org
Subject: Re: [RFC PATCH v1 25/37] perf evsel: move event open in
 evsel__open_cpu to separate function

Hi Arnaldo,

thanks for your review and your suggestions, and also for the PRIu64 patch.

On Tue, 2021-08-31 at 16:54 -0300, Arnaldo Carvalho de Melo wrote:
> Em Sat, Aug 21, 2021 at 11:19:31AM +0200, Riccardo Mancini escreveu:
> > This is the final patch splitting evsel__open_cpu.
> > This patch moves the entire loop code to a separate function, to be
> > reused for the multithreaded code.
> 
> Are you going to use that 'enum perf_event_open_err' somewhere else?
> I.e. is there a need to expose it in evsel.h?

Yes, in the next patch (26/37). It's being used to expose a function that just
does the perf_event_open calls for an evsel. It needs to return such structure
to provide information about the error (which return code, at which thread).

> 
> I'm stopping at this patch to give the ones I merged so far some
> testing, will now push it to tmp.perf/core.

I checked tmp.perf/core and it looks good to me.
I also did some additional tests to check that fallback mechanisms where
working:

check missing pid being ignored (rerun until warning is shown)
$ sudo ./perf bench internals evlist-open-close -i10 -u $UID

check that weak group fallback is working
$ sudo ./perf record -e '{cycles,cache-misses,cache-
references,cpu_clk_unhalted.thread,cycles,cycles,cycles}:W' 

check that precision_ip fallback is working:
edited perf-sys.h to make sys_perf_event_open fail if precision_ip > 2
$ sudo ./perf record -e '{cycles,cs}:P'


I've also run perf-test on my machine and it's passing too.
I'm encounteirng one fail on the "BPF filter" test (42), which is present also
in perf/core, so it should not be related to this patch.

Thanks,
Riccardo

> 
> - Arnaldo
>  
> > Signed-off-by: Riccardo Mancini <rickyman7@...il.com>
> > ---
> >  tools/perf/util/evsel.c | 142 ++++++++++++++++++++++++----------------
> >  tools/perf/util/evsel.h |  12 ++++
> >  2 files changed, 99 insertions(+), 55 deletions(-)
> > 
> > diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
> > index 2e95416b8320c6b9..e41f55a7a70ea630 100644
> > --- a/tools/perf/util/evsel.c
> > +++ b/tools/perf/util/evsel.c
> > @@ -1945,6 +1945,82 @@ bool evsel__increase_rlimit(enum rlimit_action
> > *set_rlimit)
> >         return false;
> >  }
> >  
> > +static struct perf_event_open_result perf_event_open(struct evsel *evsel,
> > +                                       pid_t pid, int cpu, int thread,
> > +                                       struct perf_cpu_map *cpus,
> > +                                       struct perf_thread_map *threads)
> > +{
> > +       int fd, group_fd, rc;
> > +       struct perf_event_open_result res;
> > +
> > +       if (!evsel->cgrp && !evsel->core.system_wide)
> > +               pid = perf_thread_map__pid(threads, thread);
> > +
> > +       group_fd = get_group_fd(evsel, cpu, thread);
> > +
> > +       test_attr__ready();
> > +
> > +       pr_debug2_peo("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags
> > %#lx",
> > +                       pid, cpus->map[cpu], group_fd, evsel->open_flags);
> > +
> > +       fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[cpu],
> > +                               group_fd, evsel->open_flags);
> > +
> > +       FD(evsel, cpu, thread) = fd;
> > +       res.fd = fd;
> > +
> > +       if (fd < 0) {
> > +               rc = -errno;
> > +
> > +               pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
> > +                               rc);
> > +               res.rc = rc;
> > +               res.err = PEO_FALLBACK;
> > +               return res;
> > +       }
> > +
> > +       bpf_counter__install_pe(evsel, cpu, fd);
> > +
> > +       if (unlikely(test_attr__enabled)) {
> > +               test_attr__open(&evsel->core.attr, pid,
> > +                       cpus->map[cpu], fd,
> > +                       group_fd, evsel->open_flags);
> > +       }
> > +
> > +       pr_debug2_peo(" = %d\n", fd);
> > +
> > +       if (evsel->bpf_fd >= 0) {
> > +               int evt_fd = fd;
> > +               int bpf_fd = evsel->bpf_fd;
> > +
> > +               rc = ioctl(evt_fd,
> > +                               PERF_EVENT_IOC_SET_BPF,
> > +                               bpf_fd);
> > +               if (rc && errno != EEXIST) {
> > +                       pr_err("failed to attach bpf fd %d: %s\n",
> > +                               bpf_fd, strerror(errno));
> > +                       res.rc = -EINVAL;
> > +                       res.err = PEO_ERROR;
> > +                       return res;
> > +               }
> > +       }
> > +
> > +       /*
> > +        * If we succeeded but had to kill clockid, fail and
> > +        * have evsel__open_strerror() print us a nice error.
> > +        */
> > +       if (perf_missing_features.clockid ||
> > +               perf_missing_features.clockid_wrong) {
> > +               res.rc = -EINVAL;
> > +               res.err = PEO_ERROR;
> > +               return res;
> > +       }
> > +
> > +       res.rc = 0;
> > +       res.err = PEO_SUCCESS;
> > +       return res;
> > +}
> > +
> >  static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
> >                 struct perf_thread_map *threads,
> >                 int start_cpu, int end_cpu)
> > @@ -1952,6 +2028,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct
> > perf_cpu_map *cpus,
> >         int cpu, thread, nthreads;
> >         int pid = -1, err, old_errno;
> >         enum rlimit_action set_rlimit = NO_CHANGE;
> > +       struct perf_event_open_result peo_res;
> >  
> >         err = __evsel__prepare_open(evsel, cpus, threads);
> >         if (err)
> > @@ -1979,67 +2056,22 @@ static int evsel__open_cpu(struct evsel *evsel, struct
> > perf_cpu_map *cpus,
> >         for (cpu = start_cpu; cpu < end_cpu; cpu++) {
> >  
> >                 for (thread = 0; thread < nthreads; thread++) {
> > -                       int fd, group_fd;
> >  retry_open:
> >                         if (thread >= nthreads)
> >                                 break;
> >  
> > -                       if (!evsel->cgrp && !evsel->core.system_wide)
> > -                               pid = perf_thread_map__pid(threads, thread);
> > -
> > -                       group_fd = get_group_fd(evsel, cpu, thread);
> > -
> > -                       test_attr__ready();
> > -
> > -                       pr_debug2_peo("sys_perf_event_open: pid %d  cpu %d 
> > group_fd %d  flags %#lx",
> > -                               pid, cpus->map[cpu], group_fd, evsel-
> > >open_flags);
> > +                       peo_res = perf_event_open(evsel, pid, cpu, thread,
> > cpus,
> > +                                               threads);
> >  
> > -                       fd = sys_perf_event_open(&evsel->core.attr, pid, cpus-
> > >map[cpu],
> > -                                               group_fd, evsel->open_flags);
> > -
> > -                       FD(evsel, cpu, thread) = fd;
> > -
> > -                       if (fd < 0) {
> > -                               err = -errno;
> > -
> > -                               pr_debug2_peo("\nsys_perf_event_open failed,
> > error %d\n",
> > -                                         err);
> > +                       err = peo_res.rc;
> > +                       switch (peo_res.err) {
> > +                       case PEO_SUCCESS:
> > +                               set_rlimit = NO_CHANGE;
> > +                               continue;
> > +                       case PEO_FALLBACK:
> >                                 goto try_fallback;
> > -                       }
> > -
> > -                       bpf_counter__install_pe(evsel, cpu, fd);
> > -
> > -                       if (unlikely(test_attr__enabled)) {
> > -                               test_attr__open(&evsel->core.attr, pid, cpus-
> > >map[cpu],
> > -                                               fd, group_fd, evsel-
> > >open_flags);
> > -                       }
> > -
> > -                       pr_debug2_peo(" = %d\n", fd);
> > -
> > -                       if (evsel->bpf_fd >= 0) {
> > -                               int evt_fd = fd;
> > -                               int bpf_fd = evsel->bpf_fd;
> > -
> > -                               err = ioctl(evt_fd,
> > -                                           PERF_EVENT_IOC_SET_BPF,
> > -                                           bpf_fd);
> > -                               if (err && errno != EEXIST) {
> > -                                       pr_err("failed to attach bpf fd %d:
> > %s\n",
> > -                                              bpf_fd, strerror(errno));
> > -                                       err = -EINVAL;
> > -                                       goto out_close;
> > -                               }
> > -                       }
> > -
> > -                       set_rlimit = NO_CHANGE;
> > -
> > -                       /*
> > -                        * If we succeeded but had to kill clockid, fail and
> > -                        * have evsel__open_strerror() print us a nice error.
> > -                        */
> > -                       if (perf_missing_features.clockid ||
> > -                           perf_missing_features.clockid_wrong) {
> > -                               err = -EINVAL;
> > +                       default:
> > +                       case PEO_ERROR:
> >                                 goto out_close;
> >                         }
> >                 }
> > diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
> > index 0a245afab2d87d74..8c9827a93ac001a7 100644
> > --- a/tools/perf/util/evsel.h
> > +++ b/tools/perf/util/evsel.h
> > @@ -282,6 +282,18 @@ int evsel__enable(struct evsel *evsel);
> >  int evsel__disable(struct evsel *evsel);
> >  int evsel__disable_cpu(struct evsel *evsel, int cpu);
> >  
> > +enum perf_event_open_err {
> > +       PEO_SUCCESS,
> > +       PEO_FALLBACK,
> > +       PEO_ERROR
> > +};
> > +
> > +struct perf_event_open_result {
> > +       enum perf_event_open_err err;
> > +       int rc;
> > +       int fd;
> > +};
> > +
> >  int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int
> > cpu);
> >  int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map
> > *threads);
> >  int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
> > -- 
> > 2.31.1
> 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ