[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150205011407.GB30372@codeaurora.org>
Date: Wed, 4 Feb 2015 17:14:07 -0800
From: Stephen Boyd <sboyd@...eaurora.org>
To: Daniel Thompson <daniel.thompson@...aro.org>
Cc: Thomas Gleixner <tglx@...utronix.de>,
John Stultz <john.stultz@...aro.org>,
linux-kernel@...r.kernel.org, patches@...aro.org,
linaro-kernel@...ts.linaro.org,
Sumit Semwal <sumit.semwal@...aro.org>,
Steven Rostedt <rostedt@...dmis.org>,
Russell King <linux@....linux.org.uk>,
Will Deacon <will.deacon@....com>,
Catalin Marinas <catalin.marinas@....com>
Subject: Re: [PATCH v3 2/4] sched_clock: Optimize cache line usage
On 01/30, Daniel Thompson wrote:
> diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
> index 3d21a8719444..cb69a47dfee4 100644
> --- a/kernel/time/sched_clock.c
> +++ b/kernel/time/sched_clock.c
> @@ -18,28 +18,44 @@
> #include <linux/seqlock.h>
> #include <linux/bitops.h>
>
> -struct clock_data {
> - ktime_t wrap_kt;
> +/**
> + * struct clock_read_data - data required to read from sched_clock
> + *
Nitpick: Won't kernel-doc complain that members aren't
documented?
> + * Care must be taken when updating this structure; it is read by
> + * some very hot code paths. It occupies <=48 bytes and, when combined
> + * with the seqcount used to synchronize access, comfortably fits into
> + * a 64 byte cache line.
> + */
> +struct clock_read_data {
> u64 epoch_ns;
> u64 epoch_cyc;
> - seqcount_t seq;
> - unsigned long rate;
> + u64 sched_clock_mask;
> + u64 (*read_sched_clock)(void);
> u32 mult;
> u32 shift;
> bool suspended;
> };
>
> +/**
> + * struct clock_data - all data needed for sched_clock (including
> + * registration of a new clock source)
> + *
Same comment.
> + * The ordering of this structure has been chosen to optimize cache
> + * performance. In particular seq and read_data (combined) should fit
> + * into a single 64 byte cache line.
> + */
> +struct clock_data {
> + seqcount_t seq;
> + struct clock_read_data read_data;
> + ktime_t wrap_kt;
> + unsigned long rate;
> +};
> @@ -60,15 +79,16 @@ unsigned long long notrace sched_clock(void)
> {
> u64 cyc, res;
> unsigned long seq;
> + struct clock_read_data *rd = &cd.read_data;
>
> do {
> seq = raw_read_seqcount_begin(&cd.seq);
>
> - res = cd.epoch_ns;
> - if (!cd.suspended) {
> - cyc = read_sched_clock();
> - cyc = (cyc - cd.epoch_cyc) & sched_clock_mask;
> - res += cyc_to_ns(cyc, cd.mult, cd.shift);
> + res = rd->epoch_ns;
> + if (!rd->suspended) {
Should this have likely() treatment? It would be really nice if
we could use static branches here to avoid any branch penalty at
all. I guess that would need some sort of special cased
stop_machine() though. Or I wonder if we could replace
rd->read_sched_clock() with a dumb function that returns
cd.epoch_cyc so that the math turns out to be 0?
> + cyc = rd->read_sched_clock();
> + cyc = (cyc - rd->epoch_cyc) & rd->sched_clock_mask;
> + res += cyc_to_ns(cyc, rd->mult, rd->shift);
> }
> } while (read_seqcount_retry(&cd.seq, seq));
>
--
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists