[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-d43698918bd46c71d494555fb92195fbea1fcb6c@git.kernel.org>
Date: Wed, 29 Apr 2009 13:07:28 GMT
From: tip-bot for Robert Richter <robert.richter@....com>
To: linux-tip-commits@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, paulus@...ba.org, hpa@...or.com,
mingo@...hat.com, robert.richter@....com, a.p.zijlstra@...llo.nl,
tglx@...utronix.de, mingo@...e.hu
Subject: [tip:perfcounters/core] perf_counter, x86: rework counter disable functions
Commit-ID: d43698918bd46c71d494555fb92195fbea1fcb6c
Gitweb: http://git.kernel.org/tip/d43698918bd46c71d494555fb92195fbea1fcb6c
Author: Robert Richter <robert.richter@....com>
AuthorDate: Wed, 29 Apr 2009 12:47:19 +0200
Committer: Ingo Molnar <mingo@...e.hu>
CommitDate: Wed, 29 Apr 2009 14:51:11 +0200
perf_counter, x86: rework counter disable functions
As for the enable function, this patch reworks the disable functions
and introduces x86_pmu_disable_counter(). The internal function i/f in
struct x86_pmu changed too.
[ Impact: refactor and generalize code ]
Signed-off-by: Robert Richter <robert.richter@....com>
Cc: Paul Mackerras <paulus@...ba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
LKML-Reference: <1241002046-8832-23-git-send-email-robert.richter@....com>
Signed-off-by: Ingo Molnar <mingo@...e.hu>
---
arch/x86/kernel/cpu/perf_counter.c | 48 +++++++++++++++++------------------
1 files changed, 23 insertions(+), 25 deletions(-)
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index ae55933..df9012b 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -45,7 +45,7 @@ struct x86_pmu {
u64 (*save_disable_all)(void);
void (*restore_all)(u64);
void (*enable)(struct hw_perf_counter *, int);
- void (*disable)(int, u64);
+ void (*disable)(struct hw_perf_counter *, int);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
@@ -425,28 +425,19 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}
-static void intel_pmu_disable_counter(int idx, u64 config)
+static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
-}
-
-static void amd_pmu_disable_counter(int idx, u64 config)
-{
- wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
-
-}
+ int err;
-static void hw_perf_disable(int idx, u64 config)
-{
if (unlikely(!perf_counters_initialized))
return;
- x86_pmu.disable(idx, config);
+ err = checking_wrmsrl(hwc->config_base + idx,
+ hwc->config);
}
static inline void
-__pmc_fixed_disable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int __idx)
+intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
{
int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;
@@ -460,13 +451,20 @@ __pmc_fixed_disable(struct perf_counter *counter,
}
static inline void
-__x86_pmu_disable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
- __pmc_fixed_disable(counter, hwc, idx);
- else
- hw_perf_disable(idx, hwc->config);
+ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+ intel_pmu_disable_fixed(hwc, idx);
+ return;
+ }
+
+ x86_pmu_disable_counter(hwc, idx);
+}
+
+static inline void
+amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+{
+ x86_pmu_disable_counter(hwc, idx);
}
static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
@@ -551,7 +549,7 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
if (cpuc->enabled)
x86_pmu_enable_counter(hwc, idx);
else
- amd_pmu_disable_counter(idx, hwc->config);
+ x86_pmu_disable_counter(hwc, idx);
}
static int
@@ -622,7 +620,7 @@ try_generic:
perf_counters_lapic_init(hwc->nmi);
- __x86_pmu_disable(counter, hwc, idx);
+ x86_pmu.disable(hwc, idx);
cpuc->counters[idx] = counter;
set_bit(idx, cpuc->active);
@@ -694,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
* could reenable again:
*/
clear_bit(idx, cpuc->active);
- __x86_pmu_disable(counter, hwc, idx);
+ x86_pmu.disable(hwc, idx);
/*
* Make sure the cleared pointer becomes visible before we
@@ -762,7 +760,7 @@ again:
intel_pmu_save_and_restart(counter);
if (perf_counter_overflow(counter, nmi, regs, 0))
- __x86_pmu_disable(counter, &counter->hw, bit);
+ intel_pmu_disable_counter(&counter->hw, bit);
}
intel_pmu_ack_status(ack);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists