lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu,  1 Mar 2012 23:12:44 -0800
From:	John Stultz <john.stultz@...aro.org>
To:	lkml <linux-kernel@...r.kernel.org>
Cc:	John Stultz <john.stultz@...aro.org>, Ingo Molnar <mingo@...e.hu>,
	Thomas Gleixner <tglx@...utronix.de>,
	Eric Dumazet <eric.dumazet@...il.com>,
	Richard Cochran <richardcochran@...il.com>
Subject: [PATCH 5/9] time: Shadow cycle_last in timekeeper structure

The clocksource cycle_last value is problematic for working on
shadow copies of the timekeeper, because the clocksource is global.

Since its mostly used only for timekeeping, move cycle_last into
the timekeeper. Unfortunately there are some uses for cycle_last
outside of timekeeping (such as tsc_read, which makes sure we haven't
skipped to a core that the TSC is behind the last read), so we
keep the clocksource cycle_last updated as well.

CC: Ingo Molnar <mingo@...e.hu>
CC: Thomas Gleixner <tglx@...utronix.de>
CC: Eric Dumazet <eric.dumazet@...il.com>
CC: Richard Cochran <richardcochran@...il.com>
Signed-off-by: John Stultz <john.stultz@...aro.org>
---
 kernel/time/timekeeping.c |   23 ++++++++++++++---------
 1 files changed, 14 insertions(+), 9 deletions(-)

diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 09460c1..ed8cb51 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -29,7 +29,8 @@ struct timekeeper {
 	u32	mult;
 	/* The shift value of the current clocksource. */
 	int	shift;
-
+	/* cycle value at last accumulation point */
+	cycle_t cycle_last;
 	/* Number of clock cycles in one NTP interval. */
 	cycle_t cycle_interval;
 	/* Number of clock shifted nano seconds in one NTP interval. */
@@ -142,7 +143,8 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 
 	old_clock = tk->clock;
 	tk->clock = clock;
-	clock->cycle_last = clock->read(clock);
+	tk->cycle_last = clock->read(clock);
+	clock->cycle_last = tk->cycle_last;
 
 	/* Do the ns -> cycle conversion first, using original mult */
 	tmp = NTP_INTERVAL_LENGTH;
@@ -195,7 +197,7 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
 	cycle_now = clock->read(clock);
 
 	/* calculate the delta since the last update_wall_time: */
-	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+	cycle_delta = (cycle_now - tk->cycle_last) & clock->mask;
 
 	nsec = cycle_delta * tk->mult + tk->xtime_nsec;
 	return nsec >> tk->shift;
@@ -211,7 +213,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
 	cycle_now = clock->read(clock);
 
 	/* calculate the delta since the last update_wall_time: */
-	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+	cycle_delta = (cycle_now - tk->cycle_last) & clock->mask;
 
 	/* return delta convert to nanoseconds. */
 	return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
@@ -259,8 +261,9 @@ static void timekeeping_forward_now(struct timekeeper *tk)
 
 	clock = tk->clock;
 	cycle_now = clock->read(clock);
-	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
-	clock->cycle_last = cycle_now;
+	cycle_delta = (cycle_now - tk->cycle_last) & clock->mask;
+	tk->cycle_last = cycle_now;
+	tk->clock->cycle_last = cycle_now;
 
 	tk->xtime_nsec += cycle_delta * tk->mult;
 
@@ -760,7 +763,8 @@ static void timekeeping_resume(void)
 		__timekeeping_inject_sleeptime(&ts);
 	}
 	/* re-base the last cycle value */
-	timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
+	timekeeper.cycle_last = timekeeper.clock->read(timekeeper.clock);
+	timekeeper.clock->cycle_last = timekeeper.cycle_last;
 	timekeeper.ntp_error = 0;
 	timekeeping_suspended = 0;
 
@@ -1026,7 +1030,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
 
 	/* Accumulate one shifted interval */
 	offset -= tk->cycle_interval << shift;
-	tk->clock->cycle_last += tk->cycle_interval << shift;
+	tk->cycle_last += tk->cycle_interval << shift;
 
 	tk->xtime_nsec += tk->xtime_interval << shift;
 	while (tk->xtime_nsec >= nsecps) {
@@ -1079,7 +1083,7 @@ static void update_wall_time(void)
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 	offset = shadow_tk.cycle_interval;
 #else
-	offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
+	offset = (clock->read(clock) - shadow_tk.cycle_last) & clock->mask;
 #endif
 
 	/*
@@ -1153,6 +1157,7 @@ static void update_wall_time(void)
 
 
 	timekeeper = shadow_tk;
+	timekeeper.clock->cycle_last = timekeeper.cycle_last;
 	timekeeping_update(&timekeeper, false);
 
 out:
-- 
1.7.3.2.146.gca209

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ