lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 22 Jan 2010 15:22:05 +0100
From:	Robert Richter <robert.richter@....com>
To:	Peter Zijlstra <a.p.zijlstra@...llo.nl>,
	Stephane Eranian <eranian@...glemail.com>
CC:	Paul Mackerras <paulus@...ba.org>, Ingo Molnar <mingo@...e.hu>,
	LKML <linux-kernel@...r.kernel.org>,
	Robert Richter <robert.richter@....com>
Subject: [PATCH 10/12] perf/core, x86: removing fixed counter handling for AMD pmu

The AMD pmu does not support fixed counters. Thus, fixed counters may
not be considered for scheduling decissions. This patch implements an
AMD specific event scheduler without fixed counter calculation that
also improves code performance in the fast path.

Signed-off-by: Robert Richter <robert.richter@....com>
---
 arch/x86/kernel/cpu/perf_event.c |   36 +++++++++++++++++++++++++++++++-----
 1 files changed, 31 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3f81f91..3e0fc29 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1407,8 +1407,8 @@ again:
 /*
  * intel-specific counter allocator: check event constraints
  */
-static int
-intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+static inline int
+__intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
 {
 	const struct event_constraint *event_constraint;
 	int i, code;
@@ -1432,7 +1432,7 @@ skip:
 }
 
 static int
-x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
 {
 	int idx;
 
@@ -1466,7 +1466,7 @@ x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
 		/* Try to get the previous generic event again */
 		if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
 try_generic:
-			idx = x86_pmu.get_event_idx(cpuc, hwc);
+			idx = __intel_get_event_idx(cpuc, hwc);
 			if (idx == -1)
 				return -EAGAIN;
 
@@ -1479,6 +1479,32 @@ try_generic:
 	return idx;
 }
 
+static int
+amd_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+	int idx;
+
+	idx = hwc->idx;
+	/* Try to get the previous generic event again */
+	if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
+		idx = gen_get_event_idx(cpuc, hwc);
+		if (idx == -1)
+			return -EAGAIN;
+
+		hwc->idx = idx;
+	}
+	hwc->config_base = x86_pmu.eventsel;
+	hwc->event_base  = x86_pmu.perfctr;
+
+	return idx;
+}
+
+static int
+x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+	return x86_pmu.get_event_idx(cpuc, hwc);
+}
+
 /*
  * Find a PMC slot for the freshly enabled / scheduled in event:
  */
@@ -2008,7 +2034,7 @@ static __initconst struct x86_pmu amd_pmu = {
 	.apic			= 1,
 	/* use highest bit to detect overflow */
 	.max_period		= (1ULL << 47) - 1,
-	.get_event_idx		= gen_get_event_idx,
+	.get_event_idx		= amd_get_event_idx,
 };
 
 static __init int p6_pmu_init(void)
-- 
1.6.6


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ