lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 13 Jan 2016 07:34:25 -0500
From:	Prarit Bhargava <prarit@...hat.com>
To:	linux-kernel@...r.kernel.org
Cc:	Prarit Bhargava <prarit@...hat.com>,
	John Stultz <john.stultz@...aro.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...nel.org>,
	Xunlei Pang <pang.xunlei@...aro.org>,
	Peter Zijlstra <peterz@...radead.org>,
	Baolin Wang <baolin.wang@...aro.org>,
	Arnd Bergmann <arnd@...db.de>
Subject: [PATCH 1/2] kernel, timekeeping, add ktime_get_[boot|real|tai]_fast_ns functions

This patch introduces NMI safe time functions,
ktime_get[boot|real|tai]_fast_ns() for use by the printk() timestamping
code, which are based on the existing NMI safe versions of accessing the
monotonic clock.

Cc: John Stultz <john.stultz@...aro.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Xunlei Pang <pang.xunlei@...aro.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Baolin Wang <baolin.wang@...aro.org>
Cc: Arnd Bergmann <arnd@...db.de>
Signed-off-by: Prarit Bhargava <prarit@...hat.com>
---
 include/linux/timekeeping.h |    3 +++
 kernel/time/timekeeping.c   |   52 ++++++++++++++++++++++++++++++++++---------
 2 files changed, 45 insertions(+), 10 deletions(-)

diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index ec89d84..c3f10f5 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -233,6 +233,9 @@ static inline u64 ktime_get_raw_ns(void)
 
 extern u64 ktime_get_mono_fast_ns(void);
 extern u64 ktime_get_raw_fast_ns(void);
+extern u64 ktime_get_boot_fast_ns(void);
+extern u64 ktime_get_real_fast_ns(void);
+extern u64 ktime_get_tai_fast_ns(void);
 
 /*
  * Timespec interfaces utilizing the ktime based ones
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d563c19..5449a5cc 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -44,6 +44,14 @@ static struct {
 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
 static struct timekeeper shadow_timekeeper;
 
+static ktime_t *offsets[TK_OFFS_MAX] = {
+	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
+	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
+	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
+};
+
+static int timekeeping_initialized;
+
 /**
  * struct tk_fast - NMI safe timekeeper
  * @seq:	Sequence counter for protecting updates. The lowest bit
@@ -375,16 +383,22 @@ static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf
  * of the following timestamps. Callers need to be aware of that and
  * deal with it.
  */
-static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
+static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf,
+					       ktime_t *offset)
 {
 	struct tk_read_base *tkr;
 	unsigned int seq;
 	u64 now;
+	ktime_t base;
+
+	if (unlikely(!timekeeping_initialized))
+		return 0;
 
 	do {
 		seq = raw_read_seqcount_latch(&tkf->seq);
 		tkr = tkf->base + (seq & 0x01);
-		now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
+		base = ktime_add(tkr->base, *offset);
+		now = ktime_to_ns(base) + timekeeping_get_ns(tkr);
 	} while (read_seqcount_retry(&tkf->seq, seq));
 
 	return now;
@@ -392,16 +406,38 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
 
 u64 ktime_get_mono_fast_ns(void)
 {
-	return __ktime_get_fast_ns(&tk_fast_mono);
+	ktime_t zero = ktime_set(0, 0);
+
+	return __ktime_get_fast_ns(&tk_fast_mono, &zero);
 }
 EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
 
 u64 ktime_get_raw_fast_ns(void)
 {
-	return __ktime_get_fast_ns(&tk_fast_raw);
+	ktime_t zero = ktime_set(0, 0);
+
+	return __ktime_get_fast_ns(&tk_fast_raw, &zero);
 }
 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
 
+u64 ktime_get_boot_fast_ns(void)
+{
+	return __ktime_get_fast_ns(&tk_fast_raw, offsets[TK_OFFS_BOOT]);
+}
+EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
+
+u64 ktime_get_real_fast_ns(void)
+{
+	return __ktime_get_fast_ns(&tk_fast_raw, offsets[TK_OFFS_REAL]);
+}
+EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
+
+u64 ktime_get_tai_fast_ns(void)
+{
+	return __ktime_get_fast_ns(&tk_fast_raw, offsets[TK_OFFS_TAI]);
+}
+EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
+
 /* Suspend-time cycles value for halted fast timekeeper. */
 static cycle_t cycles_at_suspend;
 
@@ -699,12 +735,6 @@ u32 ktime_get_resolution_ns(void)
 }
 EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
 
-static ktime_t *offsets[TK_OFFS_MAX] = {
-	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
-	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
-	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
-};
-
 ktime_t ktime_get_with_offset(enum tk_offsets offs)
 {
 	struct timekeeper *tk = &tk_core.timekeeper;
@@ -1255,6 +1285,8 @@ void __init timekeeping_init(void)
 
 	write_seqcount_end(&tk_core.seq);
 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+
+	timekeeping_initialized = 1;
 }
 
 /* time in seconds when suspend began for persistent clock */
-- 
1.7.9.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ