lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 31 Aug 2022 09:41:06 -0400
From:   "Liang, Kan" <kan.liang@...ux.intel.com>
To:     Peter Zijlstra <peterz@...radead.org>, x86@...nel.org,
        eranian@...gle.com, ravi.bangoria@....com
Cc:     linux-kernel@...r.kernel.org, acme@...nel.org,
        mark.rutland@....com, alexander.shishkin@...ux.intel.com,
        jolsa@...nel.org, namhyung@...nel.org
Subject: Re: [PATCH v2 2/9] perf/x86/intel: Move the topdown stuff into the
 intel driver



On 2022-08-29 6:10 a.m., Peter Zijlstra wrote:
> Use the new x86_pmu::{set_period,update}() methods to push the topdown
> stuff into the Intel driver, where it belongs.
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> ---
>  arch/x86/events/core.c       |    7 -------
>  arch/x86/events/intel/core.c |   28 +++++++++++++++++++++++++---
>  2 files changed, 25 insertions(+), 10 deletions(-)
> 
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -119,9 +119,6 @@ u64 x86_perf_event_update(struct perf_ev
>  	if (unlikely(!hwc->event_base))
>  		return 0;
>  
> -	if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event)
> -		return x86_pmu.update_topdown_event(event);
> -
>  	/*
>  	 * Careful: an NMI might modify the previous event value.
>  	 *
> @@ -1373,10 +1370,6 @@ int x86_perf_event_set_period(struct per
>  	if (unlikely(!hwc->event_base))
>  		return 0;
>  
> -	if (unlikely(is_topdown_count(event)) &&
> -	    x86_pmu.set_topdown_event_period)
> -		return x86_pmu.set_topdown_event_period(event);
> -
>  	/*
>  	 * If we are way outside a reasonable range then just skip forward:
>  	 */
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -2301,7 +2301,7 @@ static void intel_pmu_nhm_workaround(voi
>  	for (i = 0; i < 4; i++) {
>  		event = cpuc->events[i];
>  		if (event)
> -			x86_perf_event_update(event);
> +			static_call(x86_pmu_update)(event);
>  	}
>  
>  	for (i = 0; i < 4; i++) {
> @@ -2316,7 +2316,7 @@ static void intel_pmu_nhm_workaround(voi
>  		event = cpuc->events[i];
>  
>  		if (event) {
> -			x86_perf_event_set_period(event);
> +			static_call(x86_pmu_set_period)(event);
>  			__x86_pmu_enable_event(&event->hw,
>  					ARCH_PERFMON_EVENTSEL_ENABLE);
>  		} else
> @@ -2793,7 +2793,7 @@ static void intel_pmu_add_event(struct p
>   */
>  int intel_pmu_save_and_restart(struct perf_event *event)
>  {
> -	x86_perf_event_update(event);
> +	static_call(x86_pmu_update)(event);
>  	/*
>  	 * For a checkpointed counter always reset back to 0.  This
>  	 * avoids a situation where the counter overflows, aborts the
> @@ -2805,9 +2805,27 @@ int intel_pmu_save_and_restart(struct pe
>  		wrmsrl(event->hw.event_base, 0);
>  		local64_set(&event->hw.prev_count, 0);
>  	}
> +	return static_call(x86_pmu_set_period)(event);
> +}
> +
> +static int intel_pmu_set_period(struct perf_event *event)
> +{
> +	if (unlikely(is_topdown_count(event)) &&
> +	    x86_pmu.set_topdown_event_period)
> +		return x86_pmu.set_topdown_event_period(event);
> +
>  	return x86_perf_event_set_period(event);
>  }
>  
> +static u64 intel_pmu_update(struct perf_event *event)
> +{
> +	if (unlikely(is_topdown_count(event)) &&
> +	    x86_pmu.update_topdown_event)
> +		return x86_pmu.update_topdown_event(event);
> +
> +	return x86_perf_event_update(event);
> +}
> +
>  static void intel_pmu_reset(void)
>  {
>  	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
> @@ -4635,6 +4653,10 @@ static __initconst const struct x86_pmu
>  	.enable_all		= core_pmu_enable_all,
>  	.enable			= core_pmu_enable_event,
>  	.disable		= x86_pmu_disable_event,
> +
> +	.set_period		= intel_pmu_set_period,
> +	.update			= intel_pmu_update,

I tried the patch, but it impacts the topdown.
The root cause is that these should be added for intel_pmu rather than
core_pmu.

Thanks,
Kan
>  	.hw_config		= core_pmu_hw_config,
>  	.schedule_events	= x86_schedule_events,
>  	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
> 
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ