lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CABPqkBS_4TBYUvw_bPbtgpAfp8+vnSRZBYQoSAOKjOYV4GHmXA@mail.gmail.com>
Date:	Thu, 10 Nov 2011 16:09:32 +0100
From:	Stephane Eranian <eranian@...gle.com>
To:	Peter Zijlstra <peterz@...radead.org>
Cc:	linux-kernel@...r.kernel.org, robert.richter@....com,
	mingo@...e.hu, ming.m.lin@...el.com, ak@...ux.intel.com
Subject: Re: [PATCH] perf_events: fix and improve x86 event scheduling

On Thu, Nov 10, 2011 at 3:37 PM, Peter Zijlstra <peterz@...radead.org> wrote:
> Just throwing this out there (hasn't event been compiled etc..).
>
> The idea is to try the fixed counters first so that we don't
> 'accidentally' fill a GP counter with something that could have lived on
> the fixed purpose one and then end up under utilizing the PMU that way.
>
> It ought to solve the most common PMU programming fail on Intel
> thingies.
>
What are the configs for which you have failures on Intel?

I think I can improve my algorithm for fixed counters by treating
them separately and trying fixed counters first any supported
event.

> ---
> Index: linux-2.6/arch/x86/kernel/cpu/perf_event.c
> ===================================================================
> --- linux-2.6.orig/arch/x86/kernel/cpu/perf_event.c
> +++ linux-2.6/arch/x86/kernel/cpu/perf_event.c
> @@ -558,14 +558,22 @@ int x86_schedule_events(struct cpu_hw_ev
>                        if (c->weight != w)
>                                continue;
>
> -                       for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
> +                       if (x86_pmu.num_counters_fixed) {
> +                               j = X86_PMC_IDX_FIXED - 1;
> +                               for_each_set_bit_cont(j, c->idxmsk, X86_PMC_IDX_MAX) {
> +                                       if (!test_bit(k, used_mask))
> +                                               goto assign;
> +                               }
> +                       }
> +
> +                       for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_FIXED) {
>                                if (!test_bit(j, used_mask))
> -                                       break;
> +                                       goto assign;
>                        }
>
> -                       if (j == X86_PMC_IDX_MAX)
> -                               break;
> +                       break;
>
> +assign:
>                        __set_bit(j, used_mask);
>
>                        if (assign)
> Index: linux-2.6/include/linux/bitops.h
> ===================================================================
> --- linux-2.6.orig/include/linux/bitops.h
> +++ linux-2.6/include/linux/bitops.h
> @@ -26,6 +26,12 @@ extern unsigned long __sw_hweight64(__u6
>             (bit) < (size); \
>             (bit) = find_next_bit((addr), (size), (bit) + 1))
>
> +#define for_each_set_bit_cont(bit, addr, size) \
> +       for ((bit) = find_next_bit((addr), (size), (bit) + 1); \
> +            (bit) < (size); \
> +            (bit) = find_next_bit((addr), (size), (bit) + 1))
> +
> +
>  static __inline__ int get_bitmask_order(unsigned int count)
>  {
>        int order;
>
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ