[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <tip-3d88d56c5873f6eebe23e05c3da701960146b801@git.kernel.org>
Date: Tue, 20 Jun 2017 01:46:57 -0700
From: tip-bot for John Stultz <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: stephen.boyd@...aro.org, mingo@...nel.org, stable@...r.kernel.org,
tglx@...utronix.de, linux-kernel@...r.kernel.org, hpa@...or.com,
richardcochran@...il.com, kevin.brodsky@....com,
will.deacon@....com, john.stultz@...aro.org,
danielmentz@...gle.com, mlichvar@...hat.com, prarit@...hat.com
Subject: [tip:timers/urgent] time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond
accounting
Commit-ID: 3d88d56c5873f6eebe23e05c3da701960146b801
Gitweb: http://git.kernel.org/tip/3d88d56c5873f6eebe23e05c3da701960146b801
Author: John Stultz <john.stultz@...aro.org>
AuthorDate: Thu, 8 Jun 2017 16:44:21 -0700
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitDate: Tue, 20 Jun 2017 10:41:50 +0200
time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond accounting
Due to how the MONOTONIC_RAW accumulation logic was handled,
there is the potential for a 1ns discontinuity when we do
accumulations. This small discontinuity has for the most part
gone un-noticed, but since ARM64 enabled CLOCK_MONOTONIC_RAW
in their vDSO clock_gettime implementation, we've seen failures
with the inconsistency-check test in kselftest.
This patch addresses the issue by using the same sub-ns
accumulation handling that CLOCK_MONOTONIC uses, which avoids
the issue for in-kernel users.
Since the ARM64 vDSO implementation has its own clock_gettime
calculation logic, this patch reduces the frequency of errors,
but failures are still seen. The ARM64 vDSO will need to be
updated to include the sub-nanosecond xtime_nsec values in its
calculation for this issue to be completely fixed.
Signed-off-by: John Stultz <john.stultz@...aro.org>
Tested-by: Daniel Mentz <danielmentz@...gle.com>
Cc: Prarit Bhargava <prarit@...hat.com>
Cc: Kevin Brodsky <kevin.brodsky@....com>
Cc: Richard Cochran <richardcochran@...il.com>
Cc: Stephen Boyd <stephen.boyd@...aro.org>
Cc: Will Deacon <will.deacon@....com>
Cc: "stable #4 . 8+" <stable@...r.kernel.org>
Cc: Miroslav Lichvar <mlichvar@...hat.com>
Link: http://lkml.kernel.org/r/1496965462-20003-3-git-send-email-john.stultz@linaro.org
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
include/linux/timekeeper_internal.h | 4 ++--
kernel/time/timekeeping.c | 19 ++++++++++---------
2 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index e9834ad..f7043cc 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -57,7 +57,7 @@ struct tk_read_base {
* interval.
* @xtime_remainder: Shifted nano seconds left over when rounding
* @cycle_interval
- * @raw_interval: Raw nano seconds accumulated per NTP interval.
+ * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
* @ntp_error: Difference between accumulated time and NTP time in ntp
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
@@ -99,7 +99,7 @@ struct timekeeper {
u64 cycle_interval;
u64 xtime_interval;
s64 xtime_remainder;
- u32 raw_interval;
+ u64 raw_interval;
/* The ntp_tick_length() value currently being used.
* This cached copy ensures we consistently apply the tick
* length for an entire tick, as ntp_tick_length may change
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index eff94cb..b602c48 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -280,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
/* Go back from cycles -> shifted ns */
tk->xtime_interval = interval * clock->mult;
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
- tk->raw_interval = (interval * clock->mult) >> clock->shift;
+ tk->raw_interval = interval * clock->mult;
/* if changing clocks, convert xtime_nsec shift units */
if (old_clock) {
@@ -1996,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
u32 shift, unsigned int *clock_set)
{
u64 interval = tk->cycle_interval << shift;
- u64 raw_nsecs;
+ u64 snsec_per_sec;
/* If the offset is smaller than a shifted interval, do nothing */
if (offset < interval)
@@ -2011,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
*clock_set |= accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
- raw_nsecs = (u64)tk->raw_interval << shift;
- raw_nsecs += tk->raw_time.tv_nsec;
- if (raw_nsecs >= NSEC_PER_SEC) {
- u64 raw_secs = raw_nsecs;
- raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
- tk->raw_time.tv_sec += raw_secs;
+ tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
+ tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
+ snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+ while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
+ tk->tkr_raw.xtime_nsec -= snsec_per_sec;
+ tk->raw_time.tv_sec++;
}
- tk->raw_time.tv_nsec = raw_nsecs;
+ tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
+ tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
/* Accumulate error between NTP and clock interval */
tk->ntp_error += tk->ntp_tick << shift;
Powered by blists - more mailing lists