[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1264170127-17402-13-git-send-email-robert.richter@amd.com>
Date: Fri, 22 Jan 2010 15:22:07 +0100
From: Robert Richter <robert.richter@....com>
To: Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Stephane Eranian <eranian@...glemail.com>
CC: Paul Mackerras <paulus@...ba.org>, Ingo Molnar <mingo@...e.hu>,
LKML <linux-kernel@...r.kernel.org>,
Robert Richter <robert.richter@....com>
Subject: [PATCH 12/12] perf/core, x86: make event constraint handler generic
This patch makes the handler for event constraints that was
implemented for Intel pmus general for all models. Now, an event
constraint table can be implemented and used also for AMD models.
Signed-off-by: Robert Richter <robert.richter@....com>
---
arch/x86/kernel/cpu/perf_event.c | 43 +++++++++++++++-----------------------
1 files changed, 17 insertions(+), 26 deletions(-)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b893ee7..ac2d3a7 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1387,47 +1387,38 @@ static int fixed_mode_idx(struct hw_perf_event *hwc)
}
/*
- * generic counter allocator: get next free counter
+ * generic counter allocator
*/
static int
gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
{
- int idx;
-
-again:
- idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_counters);
- if (idx == x86_pmu.num_counters)
- return -1;
- if (test_and_set_bit(idx, cpuc->used_mask))
- goto again;
- return idx;
-}
-
-/*
- * intel-specific counter allocator: check event constraints
- */
-static inline int
-__intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
-{
const struct event_constraint *event_constraint;
- int i, code;
+ int idx, code;
if (!x86_pmu.event_constraints)
- goto skip;
+ goto get_next_free;
code = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
+ /* check event constraints */
for_each_event_constraint(event_constraint, x86_pmu.event_constraints) {
if (code == event_constraint->code) {
- for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
- if (!test_and_set_bit(i, cpuc->used_mask))
- return i;
+ for_each_bit(idx, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
}
return -1;
}
}
-skip:
- return gen_get_event_idx(cpuc, hwc);
+
+get_next_free:
+ /* get next free counter */
+ idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_counters);
+ if (idx == x86_pmu.num_counters)
+ return -1;
+ if (test_and_set_bit(idx, cpuc->used_mask))
+ goto get_next_free;
+ return idx;
}
static int
@@ -1465,7 +1456,7 @@ intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
/* Try to get the previous generic event again */
if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
try_generic:
- idx = __intel_get_event_idx(cpuc, hwc);
+ idx = gen_get_event_idx(cpuc, hwc);
if (idx == -1)
return -EAGAIN;
--
1.6.6
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists