lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171102172644.28555-7-pasha.tatashin@oracle.com>
Date:   Thu,  2 Nov 2017 13:26:44 -0400
From:   Pavel Tatashin <pasha.tatashin@...cle.com>
To:     steven.sistare@...cle.com, daniel.m.jordan@...cle.com,
        linux@...linux.org.uk, schwidefsky@...ibm.com,
        heiko.carstens@...ibm.com, john.stultz@...aro.org,
        sboyd@...eaurora.org, x86@...nel.org, linux-kernel@...r.kernel.org,
        mingo@...hat.com, tglx@...utronix.de, hpa@...or.com,
        douly.fnst@...fujitsu.com
Subject: [PATCH v7 6/6] x86/tsc: use tsc early

tsc_early_init():
Determines offset, shift and multiplier for the early clock based on the
TSC frequency.

tsc_early_fini()
Implement the finish part of early tsc feature, prints message about the
offset, which can be useful to find out how much time was spent in post and
boot manager (if TSC starts from 0 during boot)

sched_clock_early():
TSC based implementation of early clock.

Call tsc_early_init() to initialize early boot time stamps functionality on
the supported x86 platforms, and call tsc_early_fini() to finish this
feature after permanent clock has been initialized.

Signed-off-by: Pavel Tatashin <pasha.tatashin@...cle.com>
---
 arch/x86/include/asm/tsc.h |  4 +++
 arch/x86/kernel/setup.c    | 10 ++++--
 arch/x86/kernel/time.c     |  1 +
 arch/x86/kernel/tsc.c      | 81 ++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 94 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index f5e6f1c417df..6dc9618b24e3 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -50,11 +50,15 @@ extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
 extern void tsc_verify_tsc_adjust(bool resume);
 extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
+void tsc_early_init(unsigned int khz);
+void tsc_early_fini(void);
 #else
 static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
 static inline void tsc_verify_tsc_adjust(bool resume) { }
 static inline void check_tsc_sync_source(int cpu) { }
 static inline void check_tsc_sync_target(void) { }
+static inline void tsc_early_init(unsigned int khz) { }
+static inline void tsc_early_fini(void) { }
 #endif
 
 extern int notsc_setup(char *);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0957dd73d127..3df8be642b80 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -822,7 +822,11 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 	return 0;
 }
 
-static void __init simple_udelay_calibration(void)
+/*
+ * Initialize early tsc to show early boot timestamps, and also loops_per_jiffy
+ * for udelay
+ */
+static void __init early_clock_calibration(void)
 {
 	unsigned int tsc_khz, cpu_khz;
 	unsigned long lpj;
@@ -837,6 +841,8 @@ static void __init simple_udelay_calibration(void)
 	if (!tsc_khz)
 		return;
 
+	tsc_early_init(tsc_khz);
+
 	lpj = tsc_khz * 1000;
 	do_div(lpj, HZ);
 	loops_per_jiffy = lpj;
@@ -1049,7 +1055,7 @@ void __init setup_arch(char **cmdline_p)
 	 */
 	init_hypervisor_platform();
 
-	simple_udelay_calibration();
+	early_clock_calibration();
 
 	x86_init.resources.probe_roms();
 
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 3104c5304529..838c5980cae4 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -86,6 +86,7 @@ static __init void x86_late_time_init(void)
 {
 	x86_init.timers.timer_init();
 	tsc_init();
+	tsc_early_fini();
 }
 
 /*
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 1c4502a2b7b2..edacd0aa55f5 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -181,6 +181,80 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_
 	local_irq_restore(flags);
 }
 
+#ifdef CONFIG_X86_TSC
+static struct cyc2ns_data  cyc2ns_early;
+
+static u64 sched_clock_early(void)
+{
+	u64 ns = mul_u64_u32_shr(rdtsc(), cyc2ns_early.cyc2ns_mul,
+				 cyc2ns_early.cyc2ns_shift);
+	return ns + cyc2ns_early.cyc2ns_offset;
+}
+
+#ifdef CONFIG_PARAVIRT
+static inline void __init tsc_early_enable(void)
+{
+	pv_time_ops.active_sched_clock = sched_clock_early;
+}
+
+static inline void __init tsc_early_disable(void)
+{
+	pv_time_ops.active_sched_clock = pv_time_ops.sched_clock;
+}
+#else /* CONFIG_PARAVIRT */
+/*
+ * For native clock we use two switches static and dynamic, the static switch is
+ * initially true, so we check the dynamic switch, which is initially false.
+ * Later  when early clock is disabled, we can alter the static switch in order
+ * to avoid branch check on every sched_clock() call.
+ */
+static bool __tsc_early;
+static DEFINE_STATIC_KEY_TRUE(__tsc_early_static);
+
+static inline void __init tsc_early_enable(void)
+{
+	__tsc_early = true;
+}
+
+static inline void __init tsc_early_disable(void)
+{
+	__tsc_early = false;
+	static_branch_disable(&__tsc_early_static);
+}
+#endif /* CONFIG_PARAVIRT */
+
+/*
+ * Initialize clock for early time stamps
+ */
+void __init tsc_early_init(unsigned int khz)
+{
+	clocks_calc_mult_shift(&cyc2ns_early.cyc2ns_mul,
+			       &cyc2ns_early.cyc2ns_shift,
+			       khz, NSEC_PER_MSEC, 0);
+	cyc2ns_early.cyc2ns_offset = -sched_clock_early();
+	tsc_early_enable();
+}
+
+void __init tsc_early_fini(void)
+{
+	unsigned long long t;
+	unsigned long r;
+
+	/* We did not have early sched clock if multiplier is 0 */
+	if (cyc2ns_early.cyc2ns_mul == 0) {
+		tsc_early_disable();
+		return;
+	}
+
+	t = -cyc2ns_early.cyc2ns_offset;
+	r = do_div(t, NSEC_PER_SEC);
+
+	tsc_early_disable();
+	__sched_clock_offset = sched_clock_early() - sched_clock();
+	pr_info("sched clock early is finished, offset [%lld.%09lds]\n", t, r);
+}
+#endif /* CONFIG_X86_TSC */
+
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
@@ -193,6 +267,13 @@ u64 native_sched_clock(void)
 		return cycles_2_ns(tsc_now);
 	}
 
+#if !defined(CONFIG_PARAVIRT) && defined(CONFIG_X86_TSC)
+	if (static_branch_unlikely(&__tsc_early_static)) {
+		if (__tsc_early)
+			return sched_clock_early();
+	}
+#endif /* !CONFIG_PARAVIRT && CONFIG_X86_TSC */
+
 	/*
 	 * Fall back to jiffies if there's no TSC available:
 	 * ( But note that we still use it if the TSC is marked
-- 
2.15.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ