[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <f6bb6dd3-7183-42db-8e7e-c9103c0bd5ff@paulmck-laptop>
Date: Wed, 5 Mar 2025 09:43:10 -0800
From: "Paul E. McKenney" <paulmck@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: jstultz@...gle.com, tglx@...utronix.de, sboyd@...nel.org,
christian@...sel.eu, kernel-team@...a.com
Subject: [PATCH clocksource] Defer marking clocksources unstable to kthread
The clocksource watchdog marks clocksources unstable from within a timer
handler. On x86, this marking involves an on_each_cpu_cond_mask(),
which in turn invokes smp_call_function_many_cond(), which may not be
invoked from a timer handler. Doing so results in:
WARNING: CPU: 3 PID: 0 at kernel/smp.c:815 smp_call_function_many_cond+0x46b/0x4c0
Fix this by deferring the marking to the clocksource watchdog kthread.
Note that marking unstable is already deferred, so deferring it a bit
more should be just fine.
Reported-by: Christian Heusel <christian@...sel.eu>
Closes: https://lore.kernel.org/all/46a200b4-1293-413c-8b73-37f7b50abd1a@heusel.eu/
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
Cc: John Stultz <jstultz@...gle.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Stephen Boyd <sboyd@...nel.org>
---
include/linux/clocksource.h | 1 +
kernel/time/clocksource.c | 16 ++++++++++++----
2 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 65b7c41471c39..a5317a4c07990 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -149,6 +149,7 @@ struct clocksource {
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
#define CLOCK_SOURCE_RESELECT 0x100
#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
+#define CLOCK_SOURCE_DEFERRED_UNSTABLE 0x400
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 7304d7cf47f2d..60ace6f64c761 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -201,7 +201,8 @@ static void clocksource_change_rating(struct clocksource *cs, int rating)
static void __clocksource_unstable(struct clocksource *cs)
{
- cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
+ cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG |
+ CLOCK_SOURCE_DEFERRED_UNSTABLE);
cs->flags |= CLOCK_SOURCE_UNSTABLE;
/*
@@ -215,6 +216,11 @@ static void __clocksource_unstable(struct clocksource *cs)
if (cs->mark_unstable)
cs->mark_unstable(cs);
+}
+
+static void __clocksource_deferred_unstable(struct clocksource *cs)
+{
+ cs->flags |= CLOCK_SOURCE_DEFERRED_UNSTABLE;
/* kick clocksource_watchdog_kthread() */
if (finished_booting)
@@ -236,7 +242,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
list_add(&cs->wd_list, &watchdog_list);
- __clocksource_unstable(cs);
+ __clocksource_deferred_unstable(cs);
}
spin_unlock_irqrestore(&watchdog_lock, flags);
}
@@ -453,7 +459,7 @@ static void clocksource_watchdog(struct timer_list *unused)
if (read_ret == WD_READ_UNSTABLE) {
/* Clock readout unreliable, so give it up. */
- __clocksource_unstable(cs);
+ __clocksource_deferred_unstable(cs);
continue;
}
@@ -538,7 +544,7 @@ static void clocksource_watchdog(struct timer_list *unused)
pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
else
pr_warn(" No current clocksource.\n");
- __clocksource_unstable(cs);
+ __clocksource_deferred_unstable(cs);
continue;
}
@@ -703,6 +709,8 @@ static int __clocksource_watchdog_kthread(void)
spin_lock_irqsave(&watchdog_lock, flags);
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
+ if (cs->flags & CLOCK_SOURCE_DEFERRED_UNSTABLE)
+ __clocksource_unstable(cs);
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
list_del_init(&cs->wd_list);
clocksource_change_rating(cs, 0);
Powered by blists - more mailing lists