lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20131106105003.GG10651@twins.programming.kicks-ass.net>
Date:	Wed, 6 Nov 2013 11:50:03 +0100
From:	Peter Zijlstra <peterz@...radead.org>
To:	Stephane Eranian <eranian@...gle.com>
Cc:	linux-kernel@...r.kernel.org, mingo@...e.hu, ak@...ux.intel.com,
	acme@...hat.com, jolsa@...hat.com, zheng.z.yan@...el.com,
	bp@...en8.de, maria.n.dimakopoulou@...il.com
Subject: Re: [PATCH v5 3/4] perf,x86: add Intel RAPL PMU support

On Tue, Nov 05, 2013 at 06:01:25PM +0100, Stephane Eranian wrote:
> +static DEFINE_SPINLOCK(rapl_hotplug_lock);

> +static void rapl_exit_cpu(int cpu)
> +{
> +	int i, phys_id = topology_physical_package_id(cpu);
> +
> +	spin_lock(&rapl_hotplug_lock);

> +	spin_unlock(&rapl_hotplug_lock);
> +}
> +
> +static void rapl_init_cpu(int cpu)
> +{
> +	int i, phys_id = topology_physical_package_id(cpu);
> +
> +	spin_lock(&rapl_hotplug_lock);

> +	spin_unlock(&rapl_hotplug_lock);
> +}

> +static int rapl_cpu_starting(int cpu)
> +{
> +	struct rapl_pmu *pmu2;
> +	struct rapl_pmu *pmu1 = per_cpu(rapl_pmu, cpu);
> +	int i, phys_id = topology_physical_package_id(cpu);
> +
> +	if (pmu1)
> +		return 0;
> +
> +	spin_lock(&rapl_hotplug_lock);

> +	spin_unlock(&rapl_hotplug_lock);
> +	return 0;
> +}
> +
> +static int rapl_cpu_dying(int cpu)
> +{
> +	struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
> +	struct perf_event *event, *tmp;
> +
> +	if (!pmu)
> +		return 0;
> +
> +	spin_lock(&rapl_hotplug_lock);

> +	spin_unlock(&rapl_hotplug_lock);
> +	return 0;
> +}
> +
> +static int rapl_cpu_notifier(struct notifier_block *self,
> +			     unsigned long action, void *hcpu)
> +{
> +	unsigned int cpu = (long)hcpu;
> +
> +	/* allocate/free data structure for uncore box */
> +	switch (action & ~CPU_TASKS_FROZEN) {
> +	case CPU_UP_PREPARE:
> +		rapl_cpu_prepare(cpu);
> +		break;
> +	case CPU_STARTING:
> +		rapl_cpu_starting(cpu);
> +		break;
> +	case CPU_UP_CANCELED:
> +	case CPU_DYING:
> +		rapl_cpu_dying(cpu);
> +		break;
> +	case CPU_ONLINE:
> +		kfree(per_cpu(rapl_pmu_kfree, cpu));
> +		per_cpu(rapl_pmu_kfree, cpu) = NULL;
> +		break;
> +	case CPU_DEAD:
> +		per_cpu(rapl_pmu, cpu) = NULL;
> +		break;
> +	default:
> +		break;
> +	}
> +
> +	/* select the cpu that collects uncore events */
> +	switch (action & ~CPU_TASKS_FROZEN) {
> +	case CPU_DOWN_FAILED:
> +	case CPU_STARTING:
> +		rapl_init_cpu(cpu);
> +		break;
> +	case CPU_DOWN_PREPARE:
> +		rapl_exit_cpu(cpu);
> +		break;
> +	default:
> +		break;
> +	}
> +
> +	return NOTIFY_OK;
> +}

What's the point of that rapl_hotplug_lock? It appears all methods its
used from are called from hotplug notifiers and those are fully
serialized by cpu_add_remove_lock.


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ