lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 27 Feb 2012 16:29:32 -0800
From:	John Stultz <john.stultz@...aro.org>
To:	lkml <linux-kernel@...r.kernel.org>
Cc:	John Stultz <john.stultz@...aro.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Eric Dumazet <eric.dumazet@...il.com>,
	Richard Cochran <richardcochran@...il.com>
Subject: [PATCH 5/7] time: Shadow cycle_last in timekeeper structure

The clocksource cycle_last value is problematic for working on
shadow copies of the timekeeper, because the clocksource is global.

Since its mostly used only for timekeeping, move cycle_last into
the timekeeper. Unfortunately there are some uses for cycle_last
outside of timekeeping (such as tsc_read, which makes sure we haven't
skipped to a core that the TSC is behind the last read), so we
keep the clocksource cycle_last updated as well.

CC: Thomas Gleixner <tglx@...utronix.de>
CC: Eric Dumazet <eric.dumazet@...il.com>
CC: Richard Cochran <richardcochran@...il.com>
Signed-off-by: John Stultz <john.stultz@...aro.org>
---
 kernel/time/timekeeping.c |   23 ++++++++++++++---------
 1 files changed, 14 insertions(+), 9 deletions(-)

diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6c36d19..ebfb037 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -29,7 +29,8 @@ struct timekeeper {
 	u32	mult;
 	/* The shift value of the current clocksource. */
 	int	shift;
-
+	/* cycle value at last accumulation point */
+	cycle_t cycle_last;
 	/* Number of clock cycles in one NTP interval. */
 	cycle_t cycle_interval;
 	/* Number of clock shifted nano seconds in one NTP interval. */
@@ -138,7 +139,8 @@ static void timekeeper_setup_internals(struct clocksource *clock)
 	u64 tmp, ntpinterval;
 
 	timekeeper.clock = clock;
-	clock->cycle_last = clock->read(clock);
+	timekeeper.cycle_last = clock->read(clock);
+	clock->cycle_last = timekeeper.cycle_last;
 
 	/* Do the ns -> cycle conversion first, using original mult */
 	tmp = NTP_INTERVAL_LENGTH;
@@ -184,7 +186,7 @@ static inline s64 timekeeping_get_ns(void)
 	cycle_now = clock->read(clock);
 
 	/* calculate the delta since the last update_wall_time: */
-	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+	cycle_delta = (cycle_now - timekeeper.cycle_last) & clock->mask;
 
 	nsec = cycle_delta * timekeeper.mult + timekeeper.xtime_nsec;
 	return nsec >> timekeeper.shift;
@@ -200,7 +202,7 @@ static inline s64 timekeeping_get_ns_raw(void)
 	cycle_now = clock->read(clock);
 
 	/* calculate the delta since the last update_wall_time: */
-	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+	cycle_delta = (cycle_now - timekeeper.cycle_last) & clock->mask;
 
 	/* return delta convert to nanoseconds. */
 	return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
@@ -248,8 +250,9 @@ static void timekeeping_forward_now(void)
 
 	clock = timekeeper.clock;
 	cycle_now = clock->read(clock);
-	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
-	clock->cycle_last = cycle_now;
+	cycle_delta = (cycle_now - timekeeper.cycle_last) & clock->mask;
+	timekeeper.cycle_last = cycle_now;
+	timekeeper.clock->cycle_last = cycle_now;
 
 	timekeeper.xtime_nsec += cycle_delta * timekeeper.mult;
 
@@ -749,7 +752,8 @@ static void timekeeping_resume(void)
 		__timekeeping_inject_sleeptime(&ts);
 	}
 	/* re-base the last cycle value */
-	timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
+	timekeeper.cycle_last = timekeeper.clock->read(timekeeper.clock);
+	timekeeper.clock->cycle_last = timekeeper.cycle_last;
 	timekeeper.ntp_error = 0;
 	timekeeping_suspended = 0;
 
@@ -1016,7 +1020,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
 
 	/* Accumulate one shifted interval */
 	offset -= tk->cycle_interval << shift;
-	tk->clock->cycle_last += tk->cycle_interval << shift;
+	tk->cycle_last += tk->cycle_interval << shift;
 
 	tk->xtime_nsec += tk->xtime_interval << shift;
 	while (tk->xtime_nsec >= nsecps) {
@@ -1070,7 +1074,7 @@ static void update_wall_time(void)
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 	offset = tk.cycle_interval;
 #else
-	offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
+	offset = (clock->read(clock) - tk.cycle_last) & clock->mask;
 #endif
 
 	/*
@@ -1143,6 +1147,7 @@ static void update_wall_time(void)
 
 
 	timekeeper = tk;
+	timekeeper.clock->cycle_last = timekeeper.cycle_last;
 	timekeeping_update(&timekeeper, false);
 
 out:
-- 
1.7.3.2.146.gca209

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists