[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160106162558.GC731@pathway.suse.cz>
Date: Wed, 6 Jan 2016 17:25:58 +0100
From: Petr Mladek <pmladek@...e.com>
To: Prarit Bhargava <prarit@...hat.com>
Cc: linux-kernel@...r.kernel.org, John Stultz <john.stultz@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...nel.org>,
Xunlei Pang <pang.xunlei@...aro.org>,
Peter Zijlstra <peterz@...radead.org>,
Baolin Wang <baolin.wang@...aro.org>,
Arnd Bergmann <arnd@...db.de>
Subject: Re: [PATCH 1/2] kernel, timekeeping, add trylock option to
ktime_get_with_offset()
On Wed 2016-01-06 08:00:33, Prarit Bhargava wrote:
> This is a timekeeping staging patch for the printk() timestamp
> functionality that adds a trylock option for the timekeeping_lock() to
> ktime_get_with_offset(). When trylock is 1, calls to
> ktime_get_with_offset() will return return a ktime of 0 if the
> timekeeping_lock is locked.
If I get it correctly, it returns 0 when timekeeping is not
initialized. But it returns TIME_MAX_NS when the lock is taken.
Where TIME_MAX_NS is defined in the 2nd patch.
> This patch adds ktime_try_real(), ktime_try_boot(), and ktime_try_tai() as
> wrapper functions around ktime_get_with_offset() with trylock = 1, and
> modifies other callers to call ktime_get_with_offset() with trylock = 0.
>
> ---
> include/linux/timekeeping.h | 50 +++++++++++++++++++++++++++++++++++++++----
> kernel/time/timekeeping.c | 15 ++++++++++++-
> 2 files changed, 60 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
> index ec89d84..4f47352 100644
> --- a/include/linux/timekeeping.h
> +++ b/include/linux/timekeeping.h
> @@ -166,7 +166,7 @@ enum tk_offsets {
> };
>
> extern ktime_t ktime_get(void);
> -extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
> +extern ktime_t ktime_get_with_offset(enum tk_offsets offs, int trylock);
> extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
> extern ktime_t ktime_get_raw(void);
> extern u32 ktime_get_resolution_ns(void);
> @@ -176,7 +176,16 @@ extern u32 ktime_get_resolution_ns(void);
> */
> static inline ktime_t ktime_get_real(void)
> {
> - return ktime_get_with_offset(TK_OFFS_REAL);
> + return ktime_get_with_offset(TK_OFFS_REAL, 0);
> +}
> +
> +/**
> + * ktime_try_real - same as ktime_get_real, except return 0 if timekeeping is
> + * locked.
> + */
> +static inline ktime_t ktime_try_real(void)
I would call this ktime_try_get_real() to make it clear.
> +{
> + return ktime_get_with_offset(TK_OFFS_REAL, 1);
> }
>
[...]
> static inline u64 ktime_get_raw_ns(void)
> {
> return ktime_to_ns(ktime_get_raw());
> diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
> index d563c19..6e2cbeb 100644
> --- a/kernel/time/timekeeping.c
> +++ b/kernel/time/timekeeping.c
> @@ -44,6 +44,8 @@ static struct {
> static DEFINE_RAW_SPINLOCK(timekeeper_lock);
> static struct timekeeper shadow_timekeeper;
>
> +/* printk may call ktime_get_with_offset() before timekeeping is initialized. */
> +static int timekeeping_initialized;
> /**
> * struct tk_fast - NMI safe timekeeper
> * @seq: Sequence counter for protecting updates. The lowest bit
> @@ -705,15 +707,22 @@ static ktime_t *offsets[TK_OFFS_MAX] = {
> [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
> };
>
> -ktime_t ktime_get_with_offset(enum tk_offsets offs)
> +ktime_t ktime_get_with_offset(enum tk_offsets offs, int trylock)
> {
> struct timekeeper *tk = &tk_core.timekeeper;
> unsigned int seq;
> ktime_t base, *offset = offsets[offs];
> s64 nsecs;
> + unsigned long flags = 0;
> +
> + if (unlikely(!timekeeping_initialized))
> + return ktime_set(0, 0);
> WARN_ON(timekeeping_suspended);
>
> + if (trylock && !raw_spin_trylock_irqsave(&timekeeper_lock, flags))
> + return ktime_set(KTIME_MAX, 0);
> +
I guess that you want to avoid a deadlock with this. I mean that you
want to survive when you call, for example, ktime_try_tai_ns() from
inside timekeeping_set_tai_offset(). Am I right?
One problem is that it will fail even when the lock is taken from
another CPU and the deadlock is not real. It probably is not a big
issue for printk() because currently used local_clock() is far from perfect
but...
Another problem is that it will block writers. This might be solved
if you try only one while cycle instead of taking the lock.
I mean to do something like:
ktime_t ktime_get_with_offset(enum tk_offsets offs, int try_once)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
int retry;
ktime_t base, *offset = offsets[offs];
s64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
base = ktime_add(tk->tkr_mono.base, *offset);
nsecs = timekeeping_get_ns(&tk->tkr_mono);
retry = read_seqcount_retry(&tk_core.seq, seq));
} while (retry && !try_once);
if (try_once && retry)
return ktime_set(KTIME_MAX, 0);
return ktime_add_ns(base, nsecs);
}
Another question is if you really need to distinguish between
non-initialized and locked state. You might always return zero
time if you do not know. It will things easier.
> do {
> seq = read_seqcount_begin(&tk_core.seq);
> base = ktime_add(tk->tkr_mono.base, *offset);
> @@ -721,6 +730,9 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs)
>
> } while (read_seqcount_retry(&tk_core.seq, seq));
>
> + if (trylock)
> + raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
> +
> return ktime_add_ns(base, nsecs);
>
Best Regards,
Petr
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists