[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240221090548.36600-13-anna-maria@linutronix.de>
Date: Wed, 21 Feb 2024 10:05:40 +0100
From: Anna-Maria Behnsen <anna-maria@...utronix.de>
To: linux-kernel@...r.kernel.org
Cc: Peter Zijlstra <peterz@...radead.org>,
John Stultz <jstultz@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Eric Dumazet <edumazet@...gle.com>,
"Rafael J . Wysocki" <rafael.j.wysocki@...el.com>,
Arjan van de Ven <arjan@...radead.org>,
"Paul E . McKenney" <paulmck@...nel.org>,
Frederic Weisbecker <frederic@...nel.org>,
Rik van Riel <riel@...riel.com>,
Steven Rostedt <rostedt@...dmis.org>,
Sebastian Siewior <bigeasy@...utronix.de>,
Giovanni Gherdovich <ggherdovich@...e.cz>,
Lukasz Luba <lukasz.luba@....com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>,
Srinivas Pandruvada <srinivas.pandruvada@...el.com>,
K Prateek Nayak <kprateek.nayak@....com>,
Christian Loehle <christian.loehle@....com>,
Anna-Maria Behnsen <anna-maria@...utronix.de>
Subject: [PATCH v11 12/20] timers: Split out "get next timer interrupt" functionality
The functionality for getting the next timer interrupt in
get_next_timer_interrupt() is split into a separate function
fetch_next_timer_interrupt() to be usable by other call sites.
This is preparatory work for the conversion of the NOHZ timer
placement to a pull at expiry time model. No functional change.
Signed-off-by: Anna-Maria Behnsen <anna-maria@...utronix.de>
Reviewed-by: Frederic Weisbecker <frederic@...nel.org>
---
v10: Update was required (change of preceding patches)
v9: Update was required (change of preceding patches)
v6: s/splitted/split
v5: Update commit message
v4: Fix typo in comment
---
kernel/time/timer.c | 64 +++++++++++++++++++++++++++------------------
1 file changed, 38 insertions(+), 26 deletions(-)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index f119b44e44e0..9fa759dd80f5 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1988,30 +1988,13 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
return base->next_expiry;
}
-static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
- bool *idle)
+static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
+ struct timer_base *base_local,
+ struct timer_base *base_global,
+ struct timer_events *tevt)
{
- struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
unsigned long nextevt, nextevt_local, nextevt_global;
- struct timer_base *base_local, *base_global;
bool local_first;
- u64 expires;
-
- /*
- * Pretend that there is no timer pending if the cpu is offline.
- * Possible pending timers will be migrated later to an active cpu.
- */
- if (cpu_is_offline(smp_processor_id())) {
- if (idle)
- *idle = true;
- return tevt.local;
- }
-
- base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
- base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
-
- raw_spin_lock(&base_local->lock);
- raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
nextevt_local = next_timer_interrupt(base_local, basej);
nextevt_global = next_timer_interrupt(base_global, basej);
@@ -2029,8 +2012,8 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
/* If we missed a tick already, force 0 delta */
if (time_before(nextevt, basej))
nextevt = basej;
- tevt.local = basem + (u64)(nextevt - basej) * TICK_NSEC;
- goto forward;
+ tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
+ return nextevt;
}
/*
@@ -2040,12 +2023,41 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
* ignored. If the global queue is empty, nothing to do either.
*/
if (!local_first && base_global->timers_pending)
- tevt.global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
+ tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
if (base_local->timers_pending)
- tevt.local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
+ tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
+
+ return nextevt;
+}
+
+static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
+ bool *idle)
+{
+ struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
+ struct timer_base *base_local, *base_global;
+ unsigned long nextevt;
+ u64 expires;
+
+ /*
+ * Pretend that there is no timer pending if the cpu is offline.
+ * Possible pending timers will be migrated later to an active cpu.
+ */
+ if (cpu_is_offline(smp_processor_id())) {
+ if (idle)
+ *idle = true;
+ return tevt.local;
+ }
+
+ base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
+ base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
+
+ raw_spin_lock(&base_local->lock);
+ raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
+
+ nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
+ base_global, &tevt);
-forward:
/*
* We have a fresh next event. Check whether we can forward the
* base.
--
2.39.2
Powered by blists - more mailing lists