[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1427397806-20889-4-git-send-email-john.stultz@linaro.org>
Date: Thu, 26 Mar 2015 12:23:24 -0700
From: John Stultz <john.stultz@...aro.org>
To: lkml <linux-kernel@...r.kernel.org>
Cc: Daniel Thompson <daniel.thompson@...aro.org>,
Russell King <linux@....linux.org.uk>,
Will Deacon <will.deacon@....com>,
Catalin Marinas <catalin.marinas@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Stephen Boyd <sboyd@...eaurora.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
John Stultz <john.stultz@...aro.org>
Subject: [PATCH 3/5] sched_clock: Remove suspend from clock_read_data
From: Daniel Thompson <daniel.thompson@...aro.org>
Currently cd.read_data.suspended is read by the hotpath function
sched_clock(). This variable need not be accessed on the hotpath. In
fact, once it is removed, we can remove the conditional branches from
sched_clock() and install a dummy read_sched_clock function to suspend
the clock.
The new master copy of the function pointer (actual_read_sched_clock) is
introduced and is used for all reads of the clock hardware except those
within sched_clock itself.
Cc: Russell King <linux@....linux.org.uk>
Cc: Will Deacon <will.deacon@....com>
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: Daniel Thompson <daniel.thompson@...aro.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Stephen Boyd <sboyd@...eaurora.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...nel.org>
Suggested-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Stephen Boyd <sboyd@...eaurora.org>
Acked-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Daniel Thompson <daniel.thompson@...aro.org>
Signed-off-by: John Stultz <john.stultz@...aro.org>
---
kernel/time/sched_clock.c | 40 +++++++++++++++++++++++++---------------
1 file changed, 25 insertions(+), 15 deletions(-)
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 872e068..52ea5d9 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -28,10 +28,9 @@
* @read_sched_clock: Current clock source (or dummy source when suspended)
* @mult: Multipler for scaled math conversion
* @shift: Shift value for scaled math conversion
- * @suspended: Flag to indicate if the clock is suspended (stopped)
*
* Care must be taken when updating this structure; it is read by
- * some very hot code paths. It occupies <=48 bytes and, when combined
+ * some very hot code paths. It occupies <=40 bytes and, when combined
* with the seqcount used to synchronize access, comfortably fits into
* a 64 byte cache line.
*/
@@ -42,7 +41,6 @@ struct clock_read_data {
u64 (*read_sched_clock)(void);
u32 mult;
u32 shift;
- bool suspended;
};
/**
@@ -64,6 +62,7 @@ struct clock_data {
struct clock_read_data read_data;
ktime_t wrap_kt;
unsigned long rate;
+ u64 (*actual_read_sched_clock)(void);
};
static struct hrtimer sched_clock_timer;
@@ -83,6 +82,8 @@ static u64 notrace jiffy_sched_clock_read(void)
static struct clock_data cd ____cacheline_aligned = {
.read_data = { .mult = NSEC_PER_SEC / HZ,
.read_sched_clock = jiffy_sched_clock_read, },
+ .actual_read_sched_clock = jiffy_sched_clock_read,
+
};
static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
@@ -99,12 +100,9 @@ unsigned long long notrace sched_clock(void)
do {
seq = raw_read_seqcount_begin(&cd.seq);
- res = rd->epoch_ns;
- if (!rd->suspended) {
- cyc = rd->read_sched_clock();
- cyc = (cyc - rd->epoch_cyc) & rd->sched_clock_mask;
- res += cyc_to_ns(cyc, rd->mult, rd->shift);
- }
+ cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
+ rd->sched_clock_mask;
+ res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
} while (read_seqcount_retry(&cd.seq, seq));
return res;
@@ -120,7 +118,7 @@ static void notrace update_sched_clock(void)
u64 ns;
struct clock_read_data *rd = &cd.read_data;
- cyc = rd->read_sched_clock();
+ cyc = cd.actual_read_sched_clock();
ns = rd->epoch_ns +
cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask,
rd->mult, rd->shift);
@@ -166,10 +164,11 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
/* update epoch for new counter and update epoch_ns from old counter*/
new_epoch = read();
- cyc = rd->read_sched_clock();
+ cyc = cd.actual_read_sched_clock();
ns = rd->epoch_ns +
cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask,
rd->mult, rd->shift);
+ cd.actual_read_sched_clock = read;
raw_write_seqcount_begin(&cd.seq);
rd->read_sched_clock = read;
@@ -209,7 +208,7 @@ void __init sched_clock_postinit(void)
* If no sched_clock function has been provided at that point,
* make it the final one one.
*/
- if (cd.read_data.read_sched_clock == jiffy_sched_clock_read)
+ if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
update_sched_clock();
@@ -223,13 +222,24 @@ void __init sched_clock_postinit(void)
hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
}
+/*
+ * Clock read function for use when the clock is suspended.
+ *
+ * This function makes it appear to sched_clock() as if the clock
+ * stopped counting at its last update.
+ */
+static u64 notrace suspended_sched_clock_read(void)
+{
+ return cd.read_data.epoch_cyc;
+}
+
static int sched_clock_suspend(void)
{
struct clock_read_data *rd = &cd.read_data;
update_sched_clock();
hrtimer_cancel(&sched_clock_timer);
- rd->suspended = true;
+ rd->read_sched_clock = suspended_sched_clock_read;
return 0;
}
@@ -237,9 +247,9 @@ static void sched_clock_resume(void)
{
struct clock_read_data *rd = &cd.read_data;
- rd->epoch_cyc = rd->read_sched_clock();
+ rd->epoch_cyc = cd.actual_read_sched_clock();
hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
- rd->suspended = false;
+ rd->read_sched_clock = cd.actual_read_sched_clock;
}
static struct syscore_ops sched_clock_ops = {
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists