[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87a51q1uhg.ffs@tglx>
Date: Thu, 16 Oct 2025 22:07:07 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: Steve Wahl <steve.wahl@....com>, Steve Wahl <steve.wahl@....com>,
Anna-Maria Behnsen <anna-maria@...utronix.de>, Frederic Weisbecker
<frederic@...nel.org>, Ingo Molnar <mingo@...nel.org>,
linux-kernel@...r.kernel.org
Cc: Russ Anderson <rja@....com>, Dimitri Sivanich <sivanich@....com>, Kyle
Meyer <kyle.meyer@....com>
Subject: Re: [PATCH] tick/sched: Use trylock for jiffies updates by
non-timekeeper CPUs
On Mon, Oct 13 2025 at 10:09, Steve Wahl wrote:
> -static void tick_do_update_jiffies64(ktime_t now)
> +static bool _tick_do_update_jiffies64(ktime_t now, bool trylock)
> {
> unsigned long ticks = 1;
> ktime_t delta, nextp;
> @@ -70,7 +70,7 @@ static void tick_do_update_jiffies64(ktime_t now)
> */
> if (IS_ENABLED(CONFIG_64BIT)) {
> if (ktime_before(now, smp_load_acquire(&tick_next_period)))
> - return;
> + return true;
> } else {
> unsigned int seq;
>
> @@ -84,18 +84,24 @@ static void tick_do_update_jiffies64(ktime_t now)
> } while (read_seqcount_retry(&jiffies_seq, seq));
>
> if (ktime_before(now, nextp))
> - return;
> + return true;
> }
>
> /* Quick check failed, i.e. update is required. */
> - raw_spin_lock(&jiffies_lock);
> + if (trylock) {
> + /* The cpu holding the lock will do the update. */
> + if (!raw_spin_trylock(&jiffies_lock))
> + return false;
> + } else {
> + raw_spin_lock(&jiffies_lock);
> + }
Why inflicting this horrible conditional locking scheme into the main
path? This can be solved without all this churn completely independent
from this function.
Something like the uncompiled below. You get the idea.
Thanks,
tglx
---
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -203,6 +203,21 @@ static inline void tick_sched_flag_clear
#define MAX_STALLED_JIFFIES 5
+static bool tick_try_update_jiffies64(struct tick_sched *ts, ktime_t now)
+{
+ static atomic_t in_progress;
+ int inp;
+
+ inp = atomic_read(&in_progress);
+ if (inp || !atomic_try_cmpxchg(&in_progress, &inp, 1))
+ return false;
+
+ if (ts->last_tick_jiffies == jiffies)
+ tick_do_update_jiffies64(now);
+ atomic_set(&in_progress, 0);
+ return true;
+}
+
static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
{
int tick_cpu, cpu = smp_processor_id();
@@ -239,10 +254,11 @@ static void tick_sched_do_timer(struct t
ts->stalled_jiffies = 0;
ts->last_tick_jiffies = READ_ONCE(jiffies);
} else {
- if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) {
- tick_do_update_jiffies64(now);
- ts->stalled_jiffies = 0;
- ts->last_tick_jiffies = READ_ONCE(jiffies);
+ if (++ts->stalled_jiffies >= MAX_STALLED_JIFFIES) {
+ if (tick_try_update_jiffies64(ts, now)) {
+ ts->stalled_jiffies = 0;
+ ts->last_tick_jiffies = READ_ONCE(jiffies);
+ }
}
}
Powered by blists - more mailing lists