[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200516124857.75004-3-lecopzer@gmail.com>
Date: Sat, 16 May 2020 20:48:56 +0800
From: Lecopzer Chen <lecopzer@...il.com>
To: linux-kernel@...r.kernel.org
Cc: lecopzer.chen@...iatek.com, linux-arm-kernel@...ts.infradead.org,
matthias.bgg@...il.com, catalin.marinas@....com, will@...nel.org,
mark.rutland@....com, mingo@...hat.com, acme@...nel.org,
jolsa@...hat.com, namhyung@...nel.org,
linux-mediatek@...ts.infradead.org,
alexander.shishkin@...ux.intel.com, peterz@...radead.org,
yj.chiang@...iatek.com, Lecopzer Chen <lecopzer@...il.com>
Subject: [PATCH 2/3] arm64: perf: Support NMI context for perf event ISR
Perf ISR doesn't support for NMI context, thus add some necessary
condition-if to handle NMI context:
- We should not hold pmu_lock since it may have already been acquired
before NMI triggered.
- irq_work should not run at NMI context.
Signed-off-by: Lecopzer Chen <lecopzer.chen@...iatek.com>
---
arch/arm64/kernel/perf_event.c | 36 +++++++++++++++++++++++++---------
1 file changed, 27 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 4d7879484cec..94b404509f02 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -313,6 +313,23 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
(idx != ARMV8_IDX_CYCLE_COUNTER);
}
+/*
+ * NMI Perf interrupts may be triggered during kernel holding
+ * same lock.
+ * Avoid acquiring lock again in NMI context.
+ */
+#define armv8pmu_lock(lock, flags) \
+ do { \
+ if (!in_nmi()) \
+ raw_spin_lock_irqsave(lock, flags); \
+ } while (0)
+
+#define armv8pmu_unlock(lock, flags) \
+ do { \
+ if (!in_nmi()) \
+ raw_spin_unlock_irqrestore(lock, flags);\
+ } while (0)
+
/*
* ARMv8 low level PMU access
*/
@@ -589,7 +606,7 @@ static void armv8pmu_enable_event(struct perf_event *event)
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ armv8pmu_lock(&events->pmu_lock, flags);
/*
* Disable counter
@@ -611,7 +628,7 @@ static void armv8pmu_enable_event(struct perf_event *event)
*/
armv8pmu_enable_event_counter(event);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ armv8pmu_unlock(&events->pmu_lock, flags);
}
static void armv8pmu_disable_event(struct perf_event *event)
@@ -623,7 +640,7 @@ static void armv8pmu_disable_event(struct perf_event *event)
/*
* Disable counter and interrupt
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ armv8pmu_lock(&events->pmu_lock, flags);
/*
* Disable counter
@@ -635,7 +652,7 @@ static void armv8pmu_disable_event(struct perf_event *event)
*/
armv8pmu_disable_event_irq(event);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ armv8pmu_unlock(&events->pmu_lock, flags);
}
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
@@ -643,10 +660,10 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
unsigned long flags;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ armv8pmu_lock(&events->pmu_lock, flags);
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ armv8pmu_unlock(&events->pmu_lock, flags);
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
@@ -654,10 +671,10 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
unsigned long flags;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ armv8pmu_lock(&events->pmu_lock, flags);
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ armv8pmu_unlock(&events->pmu_lock, flags);
}
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
@@ -722,7 +739,8 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
- irq_work_run();
+ if (!armpmu_support_nmi())
+ irq_work_run();
return IRQ_HANDLED;
}
--
2.25.1
Powered by blists - more mailing lists