[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1529403342-17899-7-git-send-email-suzuki.poulose@arm.com>
Date: Tue, 19 Jun 2018 11:15:41 +0100
From: Suzuki K Poulose <suzuki.poulose@....com>
To: linux-arm-kernel@...ts.infradead.org
Cc: mark.rutland@....com, linux-kernel@...r.kernel.org,
will.deacon@....com, robin.murphy@....com, julien.thierry@....com,
Suzuki K Poulose <suzuki.poulose@....com>
Subject: [PATCH v3 6/7] arm64: perf: Disable PMU while processing counter overflows
The arm64 PMU updates the event counters and reprograms the
counters in the overflow IRQ handler without disabling the
PMU. This could potentially cause skews in for group counters,
where the overflowed counters may potentially loose some event
counts, while they are reprogrammed. To prevent this, disable
the PMU while we process the counter overflows and enable it
right back when we are done.
This patch also moves the PMU stop/start routines to avoid a
forward declaration.
Suggested-by: Mark Rutland <mark.rutland@....com>
Cc: Will Deacon <will.deacon@....com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@....com>
---
arch/arm64/kernel/perf_event.c | 50 +++++++++++++++++++++++-------------------
1 file changed, 28 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ce3729..eebc635 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -678,6 +678,28 @@ static void armv8pmu_disable_event(struct perf_event *event)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
+static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Enable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Disable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
u32 pmovsr;
@@ -702,6 +724,11 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
*/
regs = get_irq_regs();
+ /*
+ * Stop the PMU while processing the counter overflows
+ * to prevent skews in group events.
+ */
+ armv8pmu_stop(cpu_pmu);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -726,6 +753,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(event);
}
+ armv8pmu_start(cpu_pmu);
/*
* Handle the pending perf events.
@@ -739,28 +767,6 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
return IRQ_HANDLED;
}
-static void armv8pmu_start(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Enable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Disable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
--
2.7.4
Powered by blists - more mailing lists