lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CABPqkBRz7B=KUy1g1UzyBqwsNrj_1X5Z2Ya1SXZy2ktD6nGDig@mail.gmail.com>
Date:	Thu, 21 May 2015 06:39:29 -0700
From:	Stephane Eranian <eranian@...gle.com>
To:	Peter Zijlstra <peterz@...radead.org>
Cc:	Ingo Molnar <mingo@...nel.org>,
	Vince Weaver <vincent.weaver@...ne.edu>,
	Jiri Olsa <jolsa@...hat.com>,
	LKML <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH 09/10] perf/x86: Remove intel_excl_states::init_state

On Thu, May 21, 2015 at 4:17 AM, Peter Zijlstra <peterz@...radead.org> wrote:
> For some obscure reason intel_{start,stop}_scheduling() copy the HT
> state to an intermediate array. This would make sense if we ever were
> to make changes to it which we'd have to discard.
>
> Except we don't. By the time we call intel_commit_scheduling() we're;
> as the name implies; committed to them. We'll never back out.
>
Well, I remember there was a reason why this state was introduced. I thought
we could do it without initially but had to add it to solve a problem.
We do backtrack
in the scheduling algorithm because of the tincremental transaction between the
generic layer and the x86 layer. I'll have to remember the exact case where this
happens.

Now, it may be that because you changed the logic of the xl vs. xlo, this saved
state is not needed anymore. I'll have to look at that some more.


> So the intermediate array is pointless, modify the state in place and
> kill the extra array.
>
> And remove the pointless array initialization: INTEL_EXCL_UNUSED == 0.
>
> Note; all is serialized by intel_excl_cntr::lock.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> ---
>  arch/x86/kernel/cpu/perf_event.c       |    1 -
>  arch/x86/kernel/cpu/perf_event.h       |    1 -
>  arch/x86/kernel/cpu/perf_event_intel.c |   22 ++--------------------
>  3 files changed, 2 insertions(+), 22 deletions(-)
>
> --- a/arch/x86/kernel/cpu/perf_event.c
> +++ b/arch/x86/kernel/cpu/perf_event.c
> @@ -861,7 +861,6 @@ int x86_schedule_events(struct cpu_hw_ev
>         }
>
>         if (!assign || unsched) {
> -
>                 for (i = 0; i < n; i++) {
>                         e = cpuc->event_list[i];
>                         /*
> --- a/arch/x86/kernel/cpu/perf_event.h
> +++ b/arch/x86/kernel/cpu/perf_event.h
> @@ -132,7 +132,6 @@ enum intel_excl_state_type {
>  };
>
>  struct intel_excl_states {
> -       enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
>         enum intel_excl_state_type state[X86_PMC_IDX_MAX];
>         bool sched_started; /* true if scheduling has started */
>  };
> --- a/arch/x86/kernel/cpu/perf_event_intel.c
> +++ b/arch/x86/kernel/cpu/perf_event_intel.c
> @@ -1927,11 +1927,6 @@ intel_start_scheduling(struct cpu_hw_eve
>          * makes scheduling appear as a transaction
>          */
>         raw_spin_lock(&excl_cntrs->lock);
> -
> -       /*
> -        * Save a copy of our state to work on.
> -        */
> -       memcpy(xl->init_state, xl->state, sizeof(xl->init_state));
>  }
>
>  static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
> @@ -1955,9 +1950,9 @@ static void intel_commit_scheduling(stru
>         lockdep_assert_held(&excl_cntrs->lock);
>
>         if (c->flags & PERF_X86_EVENT_EXCL)
> -               xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
> +               xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
>         else
> -               xl->init_state[cntr] = INTEL_EXCL_SHARED;
> +               xl->state[cntr] = INTEL_EXCL_SHARED;
>  }
>
>  static void
> @@ -1980,11 +1975,6 @@ intel_stop_scheduling(struct cpu_hw_even
>
>         xl = &excl_cntrs->states[tid];
>
> -       /*
> -        * Commit the working state.
> -        */
> -       memcpy(xl->state, xl->init_state, sizeof(xl->state));
> -
>         xl->sched_started = false;
>         /*
>          * release shared state lock (acquired in intel_start_scheduling())
> @@ -2509,19 +2499,11 @@ struct intel_shared_regs *allocate_share
>  static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
>  {
>         struct intel_excl_cntrs *c;
> -       int i;
>
>         c = kzalloc_node(sizeof(struct intel_excl_cntrs),
>                          GFP_KERNEL, cpu_to_node(cpu));
>         if (c) {
>                 raw_spin_lock_init(&c->lock);
> -               for (i = 0; i < X86_PMC_IDX_MAX; i++) {
> -                       c->states[0].state[i] = INTEL_EXCL_UNUSED;
> -                       c->states[0].init_state[i] = INTEL_EXCL_UNUSED;
> -
> -                       c->states[1].state[i] = INTEL_EXCL_UNUSED;
> -                       c->states[1].init_state[i] = INTEL_EXCL_UNUSED;
> -               }
>                 c->core_id = -1;
>         }
>         return c;
>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ