lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-6a369583178d0b89c2c3919c4456ee22fee0f249@git.kernel.org>
Date:   Thu, 15 Dec 2016 02:52:31 -0800
From:   tip-bot for Thomas Gleixner <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     linux-kernel@...r.kernel.org, bruce.schlobohm@...el.com,
        hpa@...or.com, tglx@...utronix.de, bp@...en8.de,
        rscheidegger_lists@...peed.ch, mingo@...nel.org,
        kevin.b.stanton@...el.com, allen_hung@...l.com,
        peterz@...radead.org
Subject: [tip:x86/timers] x86/tsc: Validate TSC_ADJUST after resume

Commit-ID:  6a369583178d0b89c2c3919c4456ee22fee0f249
Gitweb:     http://git.kernel.org/tip/6a369583178d0b89c2c3919c4456ee22fee0f249
Author:     Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Tue, 13 Dec 2016 13:14:17 +0000
Committer:  Thomas Gleixner <tglx@...utronix.de>
CommitDate: Thu, 15 Dec 2016 11:44:29 +0100

x86/tsc: Validate TSC_ADJUST after resume

Some 'feature' BIOSes fiddle with the TSC_ADJUST register during
suspend/resume which renders the TSC unusable.

Add sanity checks into the resume path and restore the
original value if it was adjusted.

Reported-and-tested-by: Roland Scheidegger <rscheidegger_lists@...peed.ch>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Bruce Schlobohm <bruce.schlobohm@...el.com>
Cc: Kevin Stanton <kevin.b.stanton@...el.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Allen Hung <allen_hung@...l.com>
Cc: Borislav Petkov <bp@...en8.de>
Link: http://lkml.kernel.org/r/20161213131211.317654500@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>

---
 arch/x86/include/asm/tsc.h | 4 ++--
 arch/x86/kernel/process.c  | 2 +-
 arch/x86/kernel/tsc.c      | 6 ++++++
 arch/x86/kernel/tsc_sync.c | 6 +++---
 arch/x86/power/cpu.c       | 1 +
 5 files changed, 13 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index c054eaa..372ad0c 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -47,12 +47,12 @@ extern int tsc_clocksource_reliable;
  */
 #ifdef CONFIG_X86_TSC
 extern bool tsc_store_and_check_tsc_adjust(void);
-extern void tsc_verify_tsc_adjust(void);
+extern void tsc_verify_tsc_adjust(bool resume);
 extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 #else
 static inline bool tsc_store_and_check_tsc_adjust(void) { return false; }
-static inline void tsc_verify_tsc_adjust(void) { }
+static inline void tsc_verify_tsc_adjust(bool resume) { }
 static inline void check_tsc_sync_source(int cpu) { }
 static inline void check_tsc_sync_target(void) { }
 #endif
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 4fe5dc8..a67e0f0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -277,7 +277,7 @@ void exit_idle(void)
 
 void arch_cpu_idle_enter(void)
 {
-	tsc_verify_tsc_adjust();
+	tsc_verify_tsc_adjust(false);
 	local_touch_nmi();
 	enter_idle();
 }
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 2bb8de4..bfb541a 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1080,6 +1080,11 @@ static void detect_art(void)
 
 static struct clocksource clocksource_tsc;
 
+static void tsc_resume(struct clocksource *cs)
+{
+	tsc_verify_tsc_adjust(true);
+}
+
 /*
  * We used to compare the TSC to the cycle_last value in the clocksource
  * structure to avoid a nasty time-warp. This can be observed in a
@@ -1112,6 +1117,7 @@ static struct clocksource clocksource_tsc = {
 	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
 				  CLOCK_SOURCE_MUST_VERIFY,
 	.archdata               = { .vclock_mode = VCLOCK_TSC },
+	.resume			= tsc_resume,
 };
 
 void mark_tsc_unstable(char *reason)
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index a75f696..94f2ce5 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -30,7 +30,7 @@ struct tsc_adjust {
 
 static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
 
-void tsc_verify_tsc_adjust(void)
+void tsc_verify_tsc_adjust(bool resume)
 {
 	struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
 	s64 curval;
@@ -39,7 +39,7 @@ void tsc_verify_tsc_adjust(void)
 		return;
 
 	/* Rate limit the MSR check */
-	if (time_before(jiffies, adj->nextcheck))
+	if (!resume && time_before(jiffies, adj->nextcheck))
 		return;
 
 	adj->nextcheck = jiffies + HZ;
@@ -51,7 +51,7 @@ void tsc_verify_tsc_adjust(void)
 	/* Restore the original value */
 	wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
 
-	if (!adj->warned) {
+	if (!adj->warned || resume) {
 		pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
 			smp_processor_id(), adj->adjusted, curval);
 		adj->warned = true;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 53cace2..66ade16 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -252,6 +252,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
 	fix_processor_context();
 
 	do_fpu_end();
+	tsc_verify_tsc_adjust(true);
 	x86_platform.restore_sched_clock_state();
 	mtrr_bp_restore();
 	perf_restore_debug_store();

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ