[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211014024155.15253-4-kernelfans@gmail.com>
Date: Thu, 14 Oct 2021 10:41:54 +0800
From: Pingfan Liu <kernelfans@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Pingfan Liu <kernelfans@...il.com>,
Sumit Garg <sumit.garg@...aro.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
Marc Zyngier <maz@...nel.org>,
Kees Cook <keescook@...omium.org>,
Masahiro Yamada <masahiroy@...nel.org>,
Sami Tolvanen <samitolvanen@...gle.com>,
Petr Mladek <pmladek@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Wang Qing <wangqing@...o.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Santosh Sivaraj <santosh@...six.org>,
linux-arm-kernel@...ts.infradead.org
Subject: [PATCHv3 3/4] kernel/watchdog: Adapt the watchdog_hld interface for async model
When lockup_detector_init()->watchdog_nmi_probe(), PMU may be not ready
yet. E.g. on arm64, PMU is not ready until
device_initcall(armv8_pmu_driver_init). And it is deeply integrated
with the driver model and cpuhp. Hence it is hard to push this
initialization before smp_init().
But it is easy to take an opposite approach by enabling watchdog_hld to
get the capability of PMU async.
The async model is achieved by expanding watchdog_nmi_probe() with
-EBUSY, and a re-initializing work_struct which waits on a wait_queue_head.
Signed-off-by: Pingfan Liu <kernelfans@...il.com>
Cc: Sumit Garg <sumit.garg@...aro.org>
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: Will Deacon <will@...nel.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: Mark Rutland <mark.rutland@....com>
Cc: Alexander Shishkin <alexander.shishkin@...ux.intel.com>
Cc: Jiri Olsa <jolsa@...hat.com>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: Marc Zyngier <maz@...nel.org>
Cc: Kees Cook <keescook@...omium.org>
Cc: Masahiro Yamada <masahiroy@...nel.org>
Cc: Sami Tolvanen <samitolvanen@...gle.com>
Cc: Petr Mladek <pmladek@...e.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Wang Qing <wangqing@...o.com>
Cc: "Peter Zijlstra (Intel)" <peterz@...radead.org>
Cc: Santosh Sivaraj <santosh@...six.org>
Cc: linux-arm-kernel@...ts.infradead.org
To: linux-kernel@...r.kernel.org
---
include/linux/nmi.h | 9 +++++++
kernel/watchdog.c | 57 +++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 64 insertions(+), 2 deletions(-)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b7bcd63c36b4..9def85c00bd8 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -118,6 +118,15 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
void watchdog_nmi_stop(void);
void watchdog_nmi_start(void);
+
+enum hld_detector_state {
+ DELAY_INIT_NOP,
+ DELAY_INIT_WAIT,
+ DELAY_INIT_READY
+};
+
+extern enum hld_detector_state detector_delay_init_state;
+extern struct wait_queue_head hld_detector_wait;
int watchdog_nmi_probe(void);
void watchdog_nmi_enable(unsigned int cpu);
void watchdog_nmi_disable(unsigned int cpu);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 6e6dd5f0bc3e..2f267d21a7a1 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -103,7 +103,11 @@ void __weak watchdog_nmi_disable(unsigned int cpu)
hardlockup_detector_perf_disable();
}
-/* Return 0, if a NMI watchdog is available. Error code otherwise */
+/*
+ * Arch specific API. Return 0, if a NMI watchdog is available. -EBUSY if not
+ * ready, and arch code should wake up hld_detector_wait when ready. Other
+ * negative value if not support.
+ */
int __weak __init watchdog_nmi_probe(void)
{
return hardlockup_detector_perf_init();
@@ -739,15 +743,64 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
}
#endif /* CONFIG_SYSCTL */
+static void lockup_detector_delay_init(struct work_struct *work);
+enum hld_detector_state detector_delay_init_state __initdata;
+
+struct wait_queue_head hld_detector_wait __initdata =
+ __WAIT_QUEUE_HEAD_INITIALIZER(hld_detector_wait);
+
+static struct work_struct detector_work __initdata =
+ __WORK_INITIALIZER(detector_work, lockup_detector_delay_init);
+
+static void __init lockup_detector_delay_init(struct work_struct *work)
+{
+ int ret;
+
+ wait_event(hld_detector_wait,
+ detector_delay_init_state == DELAY_INIT_READY);
+ ret = watchdog_nmi_probe();
+ if (!ret) {
+ nmi_watchdog_available = true;
+ lockup_detector_setup();
+ } else {
+ WARN_ON(ret == -EBUSY);
+ pr_info("Perf NMI watchdog permanently disabled\n");
+ }
+}
+
+/* Ensure the check is called after the initialization of PMU driver */
+static int __init lockup_detector_check(void)
+{
+ if (detector_delay_init_state < DELAY_INIT_WAIT)
+ return 0;
+
+ if (WARN_ON(detector_delay_init_state == DELAY_INIT_WAIT)) {
+ detector_delay_init_state = DELAY_INIT_READY;
+ wake_up(&hld_detector_wait);
+ }
+ flush_work(&detector_work);
+ return 0;
+}
+late_initcall_sync(lockup_detector_check);
+
+
void __init lockup_detector_init(void)
{
+ int ret;
+
if (tick_nohz_full_enabled())
pr_info("Disabling watchdog on nohz_full cores by default\n");
cpumask_copy(&watchdog_cpumask,
housekeeping_cpumask(HK_FLAG_TIMER));
- if (!watchdog_nmi_probe())
+ ret = watchdog_nmi_probe();
+ if (!ret)
nmi_watchdog_available = true;
+ else if (ret == -EBUSY) {
+ detector_delay_init_state = DELAY_INIT_WAIT;
+ queue_work_on(smp_processor_id(), system_wq, &detector_work);
+ }
+
lockup_detector_setup();
}
--
2.31.1
Powered by blists - more mailing lists