[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241210062446.2413-1-evepolonium@gmail.com>
Date: Tue, 10 Dec 2024 11:54:18 +0530
From: Atharva Tiwari <evepolonium@...il.com>
To:
Cc: evepolonium@...il.com,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Lakshmi Sowjanya D <lakshmi.sowjanya.d@...el.com>,
Peter Hilber <peter.hilber@...nsynergy.com>,
"Christopher S. Hall" <christopher.s.hall@...el.com>,
Feng Tang <feng.tang@...el.com>,
Randy Dunlap <rdunlap@...radead.org>,
Marco Elver <elver@...gle.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH] x86 tsc: avoid system instability in hibernation
System instability are seen during resume from hibernation when system
is under heavy CPU load. This is due to the lack of update of sched
clock data, and the scheduler would then think that heavy CPU hog
tasks need more time in CPU, causing the system to freeze
during the unfreezing of tasks.
Signed-off-by: Atharva Tiwari <evepolonium@...il.com>
---
arch/x86/kernel/tsc.c | 27 +++++++++++++++++++++++++++
include/linux/sched/clock.h | 5 ++++-
kernel/sched/clock.c | 4 ++--
3 files changed, 33 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 67aeaba4ba9c..28a5c3ff2e90 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -15,6 +15,7 @@
#include <linux/timex.h>
#include <linux/static_key.h>
#include <linux/static_call.h>
+#include <linux/suspend.h>
#include <asm/hpet.h>
#include <asm/timer.h>
@@ -1599,3 +1600,29 @@ unsigned long calibrate_delay_is_known(void)
return 0;
}
#endif
+static int tsc_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ switch (pm_event) {
+ case PM_HIBERNATION_PREPARE:
+ clear_sched_clock_stable();
+ break;
+ case PM_POST_HIBERNATION:
+ if (!check_tsc_unstable())
+ set_sched_clock_stable();
+ break;
+ }
+
+ return 0;
+};
+
+static struct notifier_block tsc_pm_notifier_block = {
+ .notifier_call = tsc_pm_notifier,
+}
+
+static int tsc_setup_pm_notifier(void)
+{
+ return register_pm_notifier(&tsc_pm_notifier_block);
+}
+
+subsys_initcall(tsc_setup_pm_notifier);
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 196f0ca351a2..69efc280f14c 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -40,7 +40,9 @@ static inline void sched_clock_tick(void)
static inline void clear_sched_clock_stable(void)
{
}
-
+static inline void set_sched_clock_stable(void)
+{
+}
static inline void sched_clock_idle_sleep_event(void)
{
}
@@ -65,6 +67,7 @@ static __always_inline u64 local_clock(void)
}
#else
extern int sched_clock_stable(void);
+extern void set_sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
/*
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index a09655b48140..efe8f2b69657 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -114,7 +114,7 @@ notrace static void __scd_stamp(struct sched_clock_data *scd)
scd->tick_raw = sched_clock();
}
-notrace static void __set_sched_clock_stable(void)
+notrace void set_sched_clock_stable(void)
{
struct sched_clock_data *scd;
@@ -234,7 +234,7 @@ static int __init sched_clock_init_late(void)
smp_mb(); /* matches {set,clear}_sched_clock_stable() */
if (__sched_clock_stable_early)
- __set_sched_clock_stable();
+ set_sched_clock_stable();
return 0;
}
--
2.43.0
Powered by blists - more mailing lists