[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230522114922.1052421-1-arnd@kernel.org>
Date: Mon, 22 May 2023 13:48:19 +0200
From: Arnd Bergmann <arnd@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>,
Douglas Anderson <dianders@...omium.org>
Cc: Arnd Bergmann <arnd@...db.de>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Pingfan Liu <kernelfans@...il.com>,
Sumit Garg <sumit.garg@...aro.org>,
Lecopzer Chen <lecopzer.chen@...iatek.com>,
Petr Mladek <pmladek@...e.com>,
Anshuman Khandual <anshuman.khandual@....com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: [PATCH] arm64: watchdog_hld: provide arm_pmu_irq_is_nmi stub
From: Arnd Bergmann <arnd@...db.de>
The newly added arch_perf_nmi_is_available() function fails to build
when CONFIG_ARM_PMU is disabled:
arch/arm64/kernel/watchdog_hld.c: In function 'arch_perf_nmi_is_available':
arch/arm64/kernel/watchdog_hld.c:35:16: error: implicit declaration of function 'arm_pmu_irq_is_nmi' [-Werror=implicit-function-declaration]
35 | return arm_pmu_irq_is_nmi();
As it turns out, there is only one caller for that function anyway,
in the same file as the __weak definition, and this can only be called
if CONFIG_ARM_PMU is also enabled.
I tried a number of variants, but everything ended up with more
complexity from having both the __weak function and one or more
added #ifdef. Keeping it in watchdog_perf.c is a small layering
violation but otherwise the most robust.
Fixes: 7e61b33831bc ("arm64: enable perf events based hard lockup detector")
Signed-off-by: Arnd Bergmann <arnd@...db.de>
---
---
arch/arm64/kernel/watchdog_hld.c | 10 ----------
include/linux/nmi.h | 1 -
include/linux/perf/arm_pmu.h | 7 ++++---
kernel/watchdog_perf.c | 11 ++++++++++-
4 files changed, 14 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/kernel/watchdog_hld.c b/arch/arm64/kernel/watchdog_hld.c
index dcd25322127c..3d948e5c1c1e 100644
--- a/arch/arm64/kernel/watchdog_hld.c
+++ b/arch/arm64/kernel/watchdog_hld.c
@@ -24,13 +24,3 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
return (u64)max_cpu_freq * watchdog_thresh;
}
-
-bool __init arch_perf_nmi_is_available(void)
-{
- /*
- * hardlockup_detector_perf_init() will success even if Pseudo-NMI turns off,
- * however, the pmu interrupts will act like a normal interrupt instead of
- * NMI and the hardlockup detector would be broken.
- */
- return arm_pmu_irq_is_nmi();
-}
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index d23902a2fd49..1fabf8c35d27 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -212,7 +212,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh);
-bool arch_perf_nmi_is_available(void);
#endif
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 5b00f5cb4cf9..cbdd3533d843 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -12,10 +12,11 @@
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
-#include <asm/cputype.h>
#ifdef CONFIG_ARM_PMU
+#include <asm/cputype.h>
+
/*
* The ARMv7 CPU PMU supports up to 32 event counters.
*/
@@ -171,8 +172,6 @@ void kvm_host_pmu_init(struct arm_pmu *pmu);
#define kvm_host_pmu_init(x) do { } while(0)
#endif
-bool arm_pmu_irq_is_nmi(void);
-
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
void armpmu_free(struct arm_pmu *pmu);
@@ -184,6 +183,8 @@ void armpmu_free_irq(int irq, int cpu);
#endif /* CONFIG_ARM_PMU */
+bool arm_pmu_irq_is_nmi(void);
+
#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
#endif /* __ARM_PMU_H__ */
diff --git a/kernel/watchdog_perf.c b/kernel/watchdog_perf.c
index 8ea00c4a24b2..ee7d3dcfdda2 100644
--- a/kernel/watchdog_perf.c
+++ b/kernel/watchdog_perf.c
@@ -19,6 +19,7 @@
#include <asm/irq_regs.h>
#include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
static DEFINE_PER_CPU(struct perf_event *, dead_event);
@@ -234,8 +235,16 @@ void __init hardlockup_detector_perf_restart(void)
}
}
-bool __weak __init arch_perf_nmi_is_available(void)
+static bool __init arch_perf_nmi_is_available(void)
{
+ /*
+ * hardlockup_detector_perf_init() will success even if Pseudo-NMI turns off,
+ * however, the pmu interrupts will act like a normal interrupt instead of
+ * NMI and the hardlockup detector would be broken.
+ */
+ if (IS_ENABLED(CONFIG_ARM_PMU))
+ return arm_pmu_irq_is_nmi();
+
return true;
}
--
2.39.2
Powered by blists - more mailing lists