lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:	Wed, 01 Dec 2010 15:26:28 +0100
From:	Peter Zijlstra <peterz@...radead.org>
To:	Andi Kleen <andi@...stfloor.org>
Cc:	mingo@...e.hu, linux-kernel@...r.kernel.org,
	Andi Kleen <ak@...ux.intel.com>, eranian@...gle.com
Subject: Re: [PATCH 2/3] perf-events: Add support for supplementary event
 registers v4

On Mon, 2010-11-29 at 14:12 +0100, Andi Kleen wrote:

> diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
> index 817d2b1..a6754ea 100644
> --- a/arch/x86/kernel/cpu/perf_event.c
> +++ b/arch/x86/kernel/cpu/perf_event.c
> @@ -93,6 +93,8 @@ struct amd_nb {
>  	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
>  };
>  
> +struct intel_percore;
> +
>  #define MAX_LBR_ENTRIES		16
>  
>  struct cpu_hw_events {
> @@ -128,6 +130,13 @@ struct cpu_hw_events {
>  	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
>  
>  	/*
> +	 * Intel percore register state.
> +	 * Coordinate shared resources between HT threads.
> +	 */
> +	int				percore_used; /* Used by this CPU? */
> +	struct intel_percore		*per_core;
> +
> +	/*
>  	 * AMD specific bits
>  	 */
>  	struct amd_nb		*amd_nb;

> +/*
> + * Per core state
> + * This used to coordinate shared registers for HT threads.
> + */
> +struct intel_percore {
> +	raw_spinlock_t		lock;		/* protect structure */
> +	struct er_account	regs[MAX_EXTRA_REGS];
> +	int			refcnt;		/* number of threads */
> +	unsigned		core_id;
> +};
> +
>  /*
>   * Intel PerfMon, used on Core and later.
>   */


> +static int intel_pmu_cpu_prepare(int cpu)
> +{
> +	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> +	
> +	cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), 
> +				      GFP_KERNEL, cpu_to_node(cpu));
> +	if (!cpuc->per_core)
> +		return NOTIFY_BAD;
> +
> +	raw_spin_lock_init(&cpuc->per_core->lock);
> +	cpuc->per_core->core_id = -1;
> +	return NOTIFY_OK;
> +}
> +
>  static void intel_pmu_cpu_starting(int cpu)
>  {
> +	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> +	int core_id = topology_core_id(cpu);
> +	int i;
> +
> +	for_each_online_cpu(i) {
> +		struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
> +
> +		if (pc && pc->core_id == core_id) {
> +			kfree(cpuc->per_core);
> +			cpuc->per_core = pc;
> +			break;
> +		}
> +	}
> +
> +	cpuc->per_core->core_id = core_id;
> +	cpuc->per_core->refcnt++;
> +
>  	init_debug_store_on_cpu(cpu);
>  	/*
>  	 * Deal with CPUs that don't clear their LBRs on power-up.
> @@ -868,6 +1049,15 @@ static void intel_pmu_cpu_starting(int cpu)
>  
>  static void intel_pmu_cpu_dying(int cpu)
>  {
> +	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> +	struct intel_percore *pc = cpuc->per_core;
> +	
> +	if (pc) {
> +		if (pc->core_id == -1 || --pc->refcnt == 0)
> +			kfree(pc);
> +		cpuc->per_core = NULL;
> +	}
> +
>  	fini_debug_store_on_cpu(cpu);
>  }
>  
> @@ -892,7 +1082,9 @@ static __initconst const struct x86_pmu intel_pmu = {
>  	 */
>  	.max_period		= (1ULL << 31) - 1,
>  	.get_event_constraints	= intel_get_event_constraints,
> +	.put_event_constraints	= intel_put_event_constraints,
>  
> +	.cpu_prepare		= intel_pmu_cpu_prepare,
>  	.cpu_starting		= intel_pmu_cpu_starting,
>  	.cpu_dying		= intel_pmu_cpu_dying,
>  };
> @@ -1010,7 +1202,10 @@ static __init int intel_pmu_init(void)
>  		intel_pmu_lbr_init_nhm();
>  
>  		x86_pmu.event_constraints = intel_nehalem_event_constraints;
> +		x86_pmu.percore_constraints =
> +			intel_nehalem_percore_constraints;
>  		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
> +		x86_pmu.extra_regs = intel_nehalem_extra_regs;
>  		pr_cont("Nehalem events, ");
>  		break;
>  
> @@ -1032,7 +1227,10 @@ static __init int intel_pmu_init(void)
>  		intel_pmu_lbr_init_nhm();
>  
>  		x86_pmu.event_constraints = intel_westmere_event_constraints;
> +		x86_pmu.percore_constraints =
> +			intel_westmere_percore_constraints;
>  		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
> +		x86_pmu.extra_regs = intel_westmere_extra_regs;
>  		pr_cont("Westmere events, ");
>  		break;
>  

You seem to have lost the needs_percore stuff.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ