lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAP-5=fXP5Sptyh_qfmnRKw=DghWyW2ZapG5NChT-F4DiOXHXFg@mail.gmail.com>
Date:   Wed, 24 Jul 2019 15:34:29 -0700
From:   Ian Rogers <irogers@...gle.com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...hat.com>,
        Namhyung Kim <namhyung@...nel.org>,
        LKML <linux-kernel@...r.kernel.org>,
        Kan Liang <kan.liang@...ux.intel.com>,
        Stephane Eranian <eranian@...gle.com>
Subject: Re: [PATCH 2/7] perf/cgroup: order events in RB tree by cgroup id

On Mon, Jul 8, 2019 at 9:16 AM Peter Zijlstra <peterz@...radead.org> wrote:
>
> On Mon, Jul 01, 2019 at 11:59:50PM -0700, Ian Rogers wrote:
> > +static int visit_groups_merge(struct perf_event_context *ctx,
> > +                           struct perf_cpu_context *cpuctx,
> > +                           struct perf_event_groups *groups,
> > +                           int (*func)(struct perf_event_context *,
> > +                                       struct perf_cpu_context *,
> > +                                       struct perf_event *,
> > +                                       int *),
> > +                           int *data)
>
> > -struct sched_in_data {
> > -     struct perf_event_context *ctx;
> > -     struct perf_cpu_context *cpuctx;
> > -     int can_add_hw;
> > -};
> > -
> > -static int pinned_sched_in(struct perf_event *event, void *data)
> > +static int pinned_sched_in(struct perf_event_context *ctx,
> > +                        struct perf_cpu_context *cpuctx,
> > +                        struct perf_event *event,
> > +                        int *unused)
> >  {
> > -     struct sched_in_data *sid = data;
> > -
> >       if (event->state <= PERF_EVENT_STATE_OFF)
> >               return 0;
> >
> >       if (!event_filter_match(event))
> >               return 0;
> >
> > -     if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
> > -             if (!group_sched_in(event, sid->cpuctx, sid->ctx))
> > -                     list_add_tail(&event->active_list, &sid->ctx->pinned_active);
> > +     if (group_can_go_on(event, cpuctx, 1)) {
> > +             if (!group_sched_in(event, cpuctx, ctx))
> > +                     list_add_tail(&event->active_list, &ctx->pinned_active);
> >       }
> >
> >       /*
> > @@ -3317,24 +3444,25 @@ static int pinned_sched_in(struct perf_event *event, void *data)
> >       return 0;
> >  }
> >
> > -static int flexible_sched_in(struct perf_event *event, void *data)
> > +static int flexible_sched_in(struct perf_event_context *ctx,
> > +                          struct perf_cpu_context *cpuctx,
> > +                          struct perf_event *event,
> > +                          int *can_add_hw)
> >  {
> > -     struct sched_in_data *sid = data;
> > -
> >       if (event->state <= PERF_EVENT_STATE_OFF)
> >               return 0;
> >
> >       if (!event_filter_match(event))
> >               return 0;
> >
> > -     if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
> > -             int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
> > +     if (group_can_go_on(event, cpuctx, *can_add_hw)) {
> > +             int ret = group_sched_in(event, cpuctx, ctx);
> >               if (ret) {
> > -                     sid->can_add_hw = 0;
> > -                     sid->ctx->rotate_necessary = 1;
> > +                     *can_add_hw = 0;
> > +                     ctx->rotate_necessary = 1;
> >                       return 0;
> >               }
> > -             list_add_tail(&event->active_list, &sid->ctx->flexible_active);
> > +             list_add_tail(&event->active_list, &ctx->flexible_active);
> >       }
> >
> >       return 0;
> > @@ -3344,30 +3472,24 @@ static void
> >  ctx_pinned_sched_in(struct perf_event_context *ctx,
> >                   struct perf_cpu_context *cpuctx)
> >  {
> > -     struct sched_in_data sid = {
> > -             .ctx = ctx,
> > -             .cpuctx = cpuctx,
> > -             .can_add_hw = 1,
> > -     };
> > -
> > -     visit_groups_merge(&ctx->pinned_groups,
> > -                        smp_processor_id(),
> > -                        pinned_sched_in, &sid);
> > +     visit_groups_merge(ctx,
> > +                        cpuctx,
> > +                        &ctx->pinned_groups,
> > +                        pinned_sched_in,
> > +                        NULL);
> >  }
> >
> >  static void
> >  ctx_flexible_sched_in(struct perf_event_context *ctx,
> >                     struct perf_cpu_context *cpuctx)
> >  {
> > -     struct sched_in_data sid = {
> > -             .ctx = ctx,
> > -             .cpuctx = cpuctx,
> > -             .can_add_hw = 1,
> > -     };
> > +     int can_add_hw = 1;
> >
> > -     visit_groups_merge(&ctx->flexible_groups,
> > -                        smp_processor_id(),
> > -                        flexible_sched_in, &sid);
> > +     visit_groups_merge(ctx,
> > +                        cpuctx,
> > +                        &ctx->flexible_groups,
> > +                        flexible_sched_in,
> > +                        &can_add_hw);
> >  }
>
> It is not clear to me why you're doing away with that sched_in_data
> abstraction. AFAICT that has no material impact on this patch.

The change alters visit_groups_merge parameters to take ctx and cpuctx
in order to compute the number of iterators. ctx and cpuctx were taken
out of sched_in_data to avoid passing them twice. can_add_hw remains
but not wrapped inside of sched_in_data as it seemed unnecessary to
wrap a single boolean.

Thanks,
Ian

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ