lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sun, 10 Jul 2016 11:08:43 +0200
From:	Jiri Olsa <jolsa@...hat.com>
To:	Peter Zijlstra <peterz@...radead.org>
Cc:	mingo@...nel.org, acme@...nel.org, linux-kernel@...r.kernel.org,
	andi@...stfloor.org, eranian@...gle.com, jolsa@...nel.org,
	torvalds@...ux-foundation.org, davidcc@...gle.com,
	alexander.shishkin@...ux.intel.com, namhyung@...nel.org,
	kan.liang@...el.com, khandual@...ux.vnet.ibm.com
Subject: Re: [RFC][PATCH 1/7] perf/x86/intel: Rework the large PEBS setup code

On Sat, Jul 09, 2016 at 12:25:09AM +0200, Peter Zijlstra wrote:
> On Sat, Jul 09, 2016 at 12:00:47AM +0200, Peter Zijlstra wrote:
> > Yes, you're right. Let me try and see if I can make that better.
> 
> Something like so?

yep, seems good ;-)

jirka

> 
> ---
> --- a/arch/x86/events/intel/ds.c
> +++ b/arch/x86/events/intel/ds.c
> @@ -831,6 +831,18 @@ static inline void pebs_update_threshold
>  	ds->pebs_interrupt_threshold = threshold;
>  }
>  
> +static void pebs_update_state(bool needs_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
> +{
> +	if (needs_cb != pebs_needs_sched_cb(cpuc)) {
> +		if (!needs_cb)
> +			perf_sched_cb_inc(pmu);
> +		else
> +			perf_sched_cb_dec(pmu);
> +
> +		pebs_update_threshold(cpuc);
> +	}
> +}
> +
>  static void intel_pmu_pebs_add(struct perf_event *event)
>  {
>  	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> @@ -841,10 +853,7 @@ static void intel_pmu_pebs_add(struct pe
>  	if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
>  		cpuc->n_large_pebs++;
>  
> -	if (!needs_cb && pebs_needs_sched_cb(cpuc))
> -		perf_sched_cb_inc(event->ctx->pmu);
> -
> -	pebs_update_threshold(cpuc);
> +	pebs_update_state(needs_cb, cpuc, event->ctx->pmu);
>  }
>  
>  void intel_pmu_pebs_enable(struct perf_event *event)
> @@ -884,11 +893,7 @@ static void intel_pmu_pebs_del(struct pe
>  	if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
>  		cpuc->n_large_pebs--;
>  
> -	if (needs_cb && !pebs_needs_sched_cb(cpuc))
> -		perf_sched_cb_dec(event->ctx->pmu);
> -
> -	if (cpuc->n_pebs)
> -		pebs_update_threshold(cpuc);
> +	pebs_update_state(needs_cb, cpuc, event->ctx->pmu);
>  }
>  
>  void intel_pmu_pebs_disable(struct perf_event *event)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ