lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-ba75fb64693127c7b6e8a822c68d3480cbf56d6d@git.kernel.org>
Date:   Tue, 29 Nov 2016 08:48:17 -0800
From:   tip-bot for Thomas Gleixner <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     tglx@...utronix.de, peterz@...radead.org, yinghai@...nel.org,
        mingo@...nel.org, linux-kernel@...r.kernel.org, bp@...en8.de,
        hpa@...or.com
Subject: [tip:x86/timers] x86/tsc: Sync test only for the first cpu in a
 package

Commit-ID:  ba75fb64693127c7b6e8a822c68d3480cbf56d6d
Gitweb:     http://git.kernel.org/tip/ba75fb64693127c7b6e8a822c68d3480cbf56d6d
Author:     Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Sat, 19 Nov 2016 13:47:39 +0000
Committer:  Thomas Gleixner <tglx@...utronix.de>
CommitDate: Tue, 29 Nov 2016 17:29:47 +0100

x86/tsc: Sync test only for the first cpu in a package

If the TSC_ADJUST MSR is available all CPUs in a package are forced to the
same value. So TSCs cannot be out of sync when the first CPU in the package
was in sync.

That allows to skip the sync test for all CPUs except the first starting
CPU in a package.

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Ingo Molnar <mingo@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Yinghai Lu <yinghai@...nel.org>
Cc: Borislav Petkov <bp@...en8.de>
Link: http://lkml.kernel.org/r/20161119134017.809901363@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>

---
 arch/x86/include/asm/tsc.h |  4 ++--
 arch/x86/kernel/tsc_sync.c | 37 ++++++++++++++++++++++++++++---------
 2 files changed, 30 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index b896e9e..04721d5 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -49,10 +49,10 @@ extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 
 #ifdef CONFIG_X86_TSC
-extern void tsc_store_and_check_tsc_adjust(void);
+extern bool tsc_store_and_check_tsc_adjust(void);
 extern void tsc_verify_tsc_adjust(void);
 #else
-static inline void tsc_store_and_check_tsc_adjust(void) { }
+static inline bool tsc_store_and_check_tsc_adjust(void) { }
 static inline void tsc_verify_tsc_adjust(void) { }
 #endif
 
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index f9c291e..1770f60 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -59,19 +59,20 @@ void tsc_verify_tsc_adjust(void)
 }
 
 #ifndef CONFIG_SMP
-void __init tsc_store_and_check_tsc_adjust(void)
+bool __init tsc_store_and_check_tsc_adjust(void)
 {
 	struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
 	s64 bootval;
 
 	if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
-		return;
+		return false;
 
 	rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
 	cur->bootval = bootval;
 	cur->adjusted = bootval;
 	cur->nextcheck = jiffies + HZ;
 	pr_info("TSC ADJUST: Boot CPU%u: %lld\n",cpu, bootval);
+	return false;
 }
 
 #else /* !CONFIG_SMP */
@@ -79,14 +80,14 @@ void __init tsc_store_and_check_tsc_adjust(void)
 /*
  * Store and check the TSC ADJUST MSR if available
  */
-void tsc_store_and_check_tsc_adjust(void)
+bool tsc_store_and_check_tsc_adjust(void)
 {
 	struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
 	unsigned int refcpu, cpu = smp_processor_id();
 	s64 bootval;
 
 	if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
-		return;
+		return false;
 
 	rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
 	cur->bootval = bootval;
@@ -110,7 +111,7 @@ void tsc_store_and_check_tsc_adjust(void)
 		 */
 		cur->adjusted = bootval;
 		pr_info_once("TSC ADJUST: Boot CPU%u: %lld\n", cpu,  bootval);
-		return;
+		return false;
 	}
 
 	ref = per_cpu_ptr(&tsc_adjust, refcpu);
@@ -134,6 +135,11 @@ void tsc_store_and_check_tsc_adjust(void)
 		cur->adjusted = ref->adjusted;
 		wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
 	}
+	/*
+	 * We have the TSCs forced to be in sync on this package. Skip sync
+	 * test:
+	 */
+	return true;
 }
 
 /*
@@ -142,6 +148,7 @@ void tsc_store_and_check_tsc_adjust(void)
  */
 static atomic_t start_count;
 static atomic_t stop_count;
+static atomic_t skip_test;
 
 /*
  * We use a raw spinlock in this exceptional case, because
@@ -265,10 +272,16 @@ void check_tsc_sync_source(int cpu)
 	atomic_set(&stop_count, 0);
 
 	/*
-	 * Wait for the target to arrive:
+	 * Wait for the target to start or to skip the test:
 	 */
-	while (atomic_read(&start_count) != cpus-1)
+	while (atomic_read(&start_count) != cpus - 1) {
+		if (atomic_read(&skip_test) > 0) {
+			atomic_set(&skip_test, 0);
+			return;
+		}
 		cpu_relax();
+	}
+
 	/*
 	 * Trigger the target to continue into the measurement too:
 	 */
@@ -318,8 +331,14 @@ void check_tsc_sync_target(void)
 	if (unsynchronized_tsc() || tsc_clocksource_reliable)
 		return;
 
-	/* Store and check the TSC ADJUST MSR */
-	tsc_store_and_check_tsc_adjust();
+	/*
+	 * Store, verify and sanitize the TSC adjust register. If
+	 * successful skip the test.
+	 */
+	if (tsc_store_and_check_tsc_adjust()) {
+		atomic_inc(&skip_test);
+		return;
+	}
 
 	/*
 	 * Register this CPU's participation and wait for the

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ