From 1821356596bdd28dfc69f708a6929394da351001 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Sat, 26 Sep 2009 10:37:14 +0800 Subject: [PATCH] softlockup: introduce touch_softlockup_watchdog_sync() In case of system halted for a long time, system will do time sync up in the first incoming tick. But if we also touch softlockup_tick timestamp at the same time, softlockup_tick could keep a old timestamp and this will lead annoying softlockup warning. So move softlockup syncing it's timestamp to the next tick. Signed-off-by: Jason Wessel Signed-off-by: Dongdong Deng Cc: Ingo Molnar Cc: Peter Zijlstra LKML-Reference: Signed-off-by: Yong Zhang --- include/linux/sched.h | 4 ++++ kernel/softlockup.c | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 0 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 75e6e60..1aa4574 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -307,6 +307,7 @@ extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_DETECT_SOFTLOCKUP extern void softlockup_tick(void); extern void touch_softlockup_watchdog(void); +extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, void __user *buffer, @@ -320,6 +321,9 @@ static inline void softlockup_tick(void) static inline void touch_softlockup_watchdog(void) { } +static inline void touch_softlockup_watchdog_sync(void) +{ +} static inline void touch_all_softlockup_watchdogs(void) { } diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 81324d1..628d531 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock); static DEFINE_PER_CPU(unsigned long, touch_timestamp); static DEFINE_PER_CPU(unsigned long, print_timestamp); static DEFINE_PER_CPU(struct task_struct *, watchdog_task); +static DEFINE_PER_CPU(bool, softlock_touch_sync); static int __read_mostly did_panic; int __read_mostly softlockup_thresh = 60; @@ -79,6 +80,13 @@ void touch_softlockup_watchdog(void) } EXPORT_SYMBOL(touch_softlockup_watchdog); +void touch_softlockup_watchdog_sync(void) +{ + __raw_get_cpu_var(touch_timestamp) = 0; + __raw_get_cpu_var(softlock_touch_sync) = true; +} +EXPORT_SYMBOL(touch_softlockup_watchdog_sync); + void touch_all_softlockup_watchdogs(void) { int cpu; @@ -117,6 +125,15 @@ void softlockup_tick(void) return; } + /* + * If the scheduler tick is not up to date. Skip this check + * and wait for the timestamp to to synced in the next tick + */ + if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) { + per_cpu(softlock_touch_sync, this_cpu) = false; + return; + } + if (touch_timestamp == 0) { __touch_softlockup_watchdog(); return; @@ -216,6 +233,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) return NOTIFY_BAD; } per_cpu(touch_timestamp, hotcpu) = 0; + per_cpu(softlock_touch_sync, hotcpu) = false; per_cpu(watchdog_task, hotcpu) = p; kthread_bind(p, hotcpu); break; -- 1.6.0.4