lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1461787251-6702-2-git-send-email-andi@firstfloor.org>
Date:	Wed, 27 Apr 2016 13:00:41 -0700
From:	Andi Kleen <andi@...stfloor.org>
To:	acme@...nel.org
Cc:	peterz@...radead.org, jolsa@...nel.org,
	linux-kernel@...r.kernel.org, Andi Kleen <ak@...ux.intel.com>
Subject: [PATCH 01/11] x86, perf: Support sysfs files depending on SMT status

From: Andi Kleen <ak@...ux.intel.com>

Add a way to show different sysfs events attributes depending on
HyperThreading is on or off. This is difficult to determine
early at boot, so we just do it dynamically when the sysfs
attribute is read.

v2:
Compute HT status only once in CPU online/offline hooks.
Signed-off-by: Andi Kleen <ak@...ux.intel.com>
---
 arch/x86/events/core.c       | 35 +++++++++++++++++++++++++++++++++++
 arch/x86/events/perf_event.h | 19 +++++++++++++++++++
 include/linux/perf_event.h   |  7 +++++++
 3 files changed, 61 insertions(+)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 41d93d0e972b..f1411062ccfb 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1477,6 +1477,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 	unsigned int cpu = (long)hcpu;
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 	int i, ret = NOTIFY_OK;
+	bool ht_on;
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_UP_PREPARE:
@@ -1496,6 +1497,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 			kfree(cpuc->kfree_on_online[i]);
 			cpuc->kfree_on_online[i] = NULL;
 		}
+		x86_pmu.ht_on = cpumask_weight(topology_sibling_cpumask(cpu)) > 1;
 		break;
 
 	case CPU_DYING:
@@ -1507,6 +1509,15 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 	case CPU_DEAD:
 		if (x86_pmu.cpu_dead)
 			x86_pmu.cpu_dead(cpu);
+		/* Recompute HT state for all CPUs on offline */
+		ht_on = false;
+		for_each_online_cpu (cpu) {
+			if (cpumask_weight(topology_sibling_cpumask(cpu)) > 1) {
+				ht_on = true;
+				break;
+			}
+		}
+		x86_pmu.ht_on = ht_on;
 		break;
 
 	default:
@@ -1616,6 +1627,30 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, cha
 }
 EXPORT_SYMBOL_GPL(events_sysfs_show);
 
+ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
+			  char *page)
+{
+	struct perf_pmu_events_ht_attr *pmu_attr =
+		container_of(attr, struct perf_pmu_events_ht_attr, attr);
+
+	/*
+	 * Report conditional events depending on Hyper-Threading.
+	 *
+	 * This is overly conservative as usually the HT special
+	 * handling is not needed if the other CPU thread is idle.
+	 *
+	 * Note this does not (cannot) handle the case when thread
+	 * siblings are invisible, for example with virtualization
+	 * if they are owned by some other guest.  The user tool
+	 * has to re-read when a thread sibling gets onlined later.
+	 */
+
+	return sprintf(page, "%s",
+			x86_pmu.ht_on ?
+			pmu_attr->event_str_ht :
+			pmu_attr->event_str_noht);
+}
+
 EVENT_ATTR(cpu-cycles,			CPU_CYCLES		);
 EVENT_ATTR(instructions,		INSTRUCTIONS		);
 EVENT_ATTR(cache-references,		CACHE_REFERENCES	);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 7d62a02f49a4..6ae84bb91402 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -622,6 +622,11 @@ struct x86_pmu {
 	 * Intel host/guest support (KVM)
 	 */
 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
+
+	/*
+	 * Hyper Threading on?
+	 */
+	bool ht_on;
 };
 
 struct x86_perf_task_context {
@@ -667,6 +672,14 @@ static struct perf_pmu_events_attr event_attr_##v = {			\
 	.event_str	= str,						\
 };
 
+#define EVENT_ATTR_STR_HT(_name, v, noht, ht)				\
+static struct perf_pmu_events_ht_attr event_attr_##v = {		\
+	.attr		= __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
+	.id		= 0,						\
+	.event_str_noht	= noht,						\
+	.event_str_ht	= ht,						\
+}
+
 extern struct x86_pmu x86_pmu __read_mostly;
 
 static inline bool x86_pmu_has_lbr_callstack(void)
@@ -937,6 +950,12 @@ int p6_pmu_init(void);
 
 int knc_pmu_init(void);
 
+ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
+			  char *page);
+
+ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
+			  char *page);
+
 static inline int is_ht_workaround_enabled(void)
 {
 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a090700cccca..b293b89a276f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1205,6 +1205,13 @@ struct perf_pmu_events_attr {
 	const char *event_str;
 };
 
+struct perf_pmu_events_ht_attr {
+	struct device_attribute attr;
+	u64 id;
+	const char *event_str_ht;
+	const char *event_str_noht;
+};
+
 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
 			      char *page);
 
-- 
2.5.5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ