lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 7 Sep 2022 10:57:35 +0530
From:   Anshuman Khandual <anshuman.khandual@....com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
        James Clark <james.clark@....com>,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...nel.org>,
        Namhyung Kim <namhyung@...nel.org>,
        linux-arm-kernel@...ts.infradead.org, x86@...nel.org,
        Thomas Gleixner <tglx@...utronix.de>,
        Borislav Petkov <bp@...en8.de>
Subject: Re: [PATCH V2 4/4] x86/perf: Assert all platform event flags are
 within PERF_EVENT_FLAG_ARCH



On 9/7/22 00:52, Peter Zijlstra wrote:
> On Mon, Sep 05, 2022 at 11:12:39AM +0530, Anshuman Khandual wrote:
> 
>>  arch/x86/events/perf_event.h | 20 ++++++++++++++++++++
>>  1 file changed, 20 insertions(+)
>>
>> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
>> index ba3d24a6a4ec..12136a33e9b7 100644
>> --- a/arch/x86/events/perf_event.h
>> +++ b/arch/x86/events/perf_event.h
>> @@ -86,6 +86,26 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
>>  #define PERF_X86_EVENT_AMD_BRS		0x10000 /* AMD Branch Sampling */
>>  #define PERF_X86_EVENT_PEBS_LAT_HYBRID	0x20000 /* ld and st lat for hybrid */
>>  
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LDLAT) == PERF_X86_EVENT_PEBS_LDLAT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_ST) == PERF_X86_EVENT_PEBS_ST);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_ST_HSW) == PERF_X86_EVENT_PEBS_ST_HSW);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LD_HSW) == PERF_X86_EVENT_PEBS_LD_HSW);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_NA_HSW) == PERF_X86_EVENT_PEBS_NA_HSW);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_EXCL) == PERF_X86_EVENT_EXCL);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_DYNAMIC) == PERF_X86_EVENT_DYNAMIC);
>> +
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_EXCL_ACCT) == PERF_X86_EVENT_EXCL_ACCT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_AUTO_RELOAD) == PERF_X86_EVENT_AUTO_RELOAD);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_LARGE_PEBS) == PERF_X86_EVENT_LARGE_PEBS);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_VIA_PT) == PERF_X86_EVENT_PEBS_VIA_PT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PAIR) == PERF_X86_EVENT_PAIR);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_LBR_SELECT) == PERF_X86_EVENT_LBR_SELECT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_TOPDOWN) == PERF_X86_EVENT_TOPDOWN);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_STLAT) == PERF_X86_EVENT_PEBS_STLAT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_AMD_BRS) == PERF_X86_EVENT_AMD_BRS);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LAT_HYBRID)
>> +				   == PERF_X86_EVENT_PEBS_LAT_HYBRID);
> 
> 
> That's not half tedious...
> 
> How about something like so?

Makes sense, will fold this back. Could I also include your "Signed-off-by:"
for this patch ?

- Anshuman

> 
> ---
> --- a/arch/x86/events/perf_event.h
> +++ b/arch/x86/events/perf_event.h
> @@ -64,27 +64,25 @@ static inline bool constraint_match(stru
>  	return ((ecode & c->cmask) - c->code) <= (u64)c->size;
>  }
>  
> +#define PERF_ARCH(name, val)	\
> +	PERF_X86_EVENT_##name = val,
> +
>  /*
>   * struct hw_perf_event.flags flags
>   */
> -#define PERF_X86_EVENT_PEBS_LDLAT	0x00001 /* ld+ldlat data address sampling */
> -#define PERF_X86_EVENT_PEBS_ST		0x00002 /* st data address sampling */
> -#define PERF_X86_EVENT_PEBS_ST_HSW	0x00004 /* haswell style datala, store */
> -#define PERF_X86_EVENT_PEBS_LD_HSW	0x00008 /* haswell style datala, load */
> -#define PERF_X86_EVENT_PEBS_NA_HSW	0x00010 /* haswell style datala, unknown */
> -#define PERF_X86_EVENT_EXCL		0x00020 /* HT exclusivity on counter */
> -#define PERF_X86_EVENT_DYNAMIC		0x00040 /* dynamic alloc'd constraint */
> -
> -#define PERF_X86_EVENT_EXCL_ACCT	0x00100 /* accounted EXCL event */
> -#define PERF_X86_EVENT_AUTO_RELOAD	0x00200 /* use PEBS auto-reload */
> -#define PERF_X86_EVENT_LARGE_PEBS	0x00400 /* use large PEBS */
> -#define PERF_X86_EVENT_PEBS_VIA_PT	0x00800 /* use PT buffer for PEBS */
> -#define PERF_X86_EVENT_PAIR		0x01000 /* Large Increment per Cycle */
> -#define PERF_X86_EVENT_LBR_SELECT	0x02000 /* Save/Restore MSR_LBR_SELECT */
> -#define PERF_X86_EVENT_TOPDOWN		0x04000 /* Count Topdown slots/metrics events */
> -#define PERF_X86_EVENT_PEBS_STLAT	0x08000 /* st+stlat data address sampling */
> -#define PERF_X86_EVENT_AMD_BRS		0x10000 /* AMD Branch Sampling */
> -#define PERF_X86_EVENT_PEBS_LAT_HYBRID	0x20000 /* ld and st lat for hybrid */
> +enum {
> +#include "perf_event_flags.h"
> +};
> +
> +#undef PERF_ARCH
> +
> +#define PERF_ARCH(name, val)						\
> +	static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) ==	\
> +		      PERF_X86_EVENT_##name);
> +
> +#include "perf_event_flags.h"
> +
> +#undef PERF_ARCH
>  
>  static inline bool is_topdown_count(struct perf_event *event)
>  {
> --- /dev/null
> +++ b/arch/x86/events/perf_event_flags.h
> @@ -0,0 +1,22 @@
> +
> +/*
> + * struct hw_perf_event.flags flags
> + */
> +PERF_ARCH(PEBS_LDLAT,		0x00001) /* ld+ldlat data address sampling */
> +PERF_ARCH(PEBS_ST,		0x00002) /* st data address sampling */
> +PERF_ARCH(PEBS_ST_HSW,		0x00004) /* haswell style datala, store */
> +PERF_ARCH(PEBS_LD_HSW,		0x00008) /* haswell style datala, load */
> +PERF_ARCH(PEBS_NA_HSW,		0x00010) /* haswell style datala, unknown */
> +PERF_ARCH(EXCL,			0x00020) /* HT exclusivity on counter */
> +PERF_ARCH(DYNAMIC,		0x00040) /* dynamic alloc'd constraint */
> +			/*	0x00080	*/
> +PERF_ARCH(EXCL_ACCT,		0x00100) /* accounted EXCL event */
> +PERF_ARCH(AUTO_RELOAD,		0x00200) /* use PEBS auto-reload */
> +PERF_ARCH(LARGE_PEBS,		0x00400) /* use large PEBS */
> +PERF_ARCH(PEBS_VIA_PT,		0x00800) /* use PT buffer for PEBS */
> +PERF_ARCH(PAIR,			0x01000) /* Large Increment per Cycle */
> +PERF_ARCH(LBR_SELECT,		0x02000) /* Save/Restore MSR_LBR_SELECT */
> +PERF_ARCH(TOPDOWN,		0x04000) /* Count Topdown slots/metrics events */
> +PERF_ARCH(PEBS_STLAT,		0x08000) /* st+stlat data address sampling */
> +PERF_ARCH(AMD_BRS,		0x10000) /* AMD Branch Sampling */
> +PERF_ARCH(PEBS_LAT_HYBRID,	0x20000) /* ld and st lat for hybrid */

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ