[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20260204135813.GL2995752@noisy.programming.kicks-ass.net>
Date: Wed, 4 Feb 2026 14:58:13 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Thomas Gleixner <tglx@...nel.org>
Cc: arnd@...db.de, anna-maria@...utronix.de, frederic@...nel.org,
luto@...nel.org, mingo@...hat.com, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
vschneid@...hat.com, linux-kernel@...r.kernel.org,
oliver.sang@...el.com
Subject: Re: [PATCH v2 5/6] entry,hrtimer: Push reprogramming timers into the
interrupt return path
On Tue, Feb 03, 2026 at 12:28:13AM +0100, Thomas Gleixner wrote:
> But looking at this there is already a problem without interrupt
> nesting:
>
> irq_enter_rcu();
> timer_interrupt()
> hrtimer_interrupt()
> delay_rearm();
> irq_exit_rcu()
> __irq_exit_rcu()
> invoke_softirq() <- Here
>
> Soft interrupts can run for quite some time, which means this already
> can cause timers being delayed for way too long. I think in
> __irq_exit_rcu() you want to do:
>
> if (!in_interrupt() && local_softirq_pending()) {
> hrtimer_rearm();
> invoke_softirq();
> }
Right, and we can do the same on (nested) IRQ entry. Something like so:
---
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -63,6 +63,8 @@ static __always_inline unsigned long __e
if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) {
if (!rseq_grant_slice_extension(ti_work & TIF_SLICE_EXT_DENY))
schedule();
+ else
+ hrtimer_rearm();
}
if (ti_work & _TIF_UPROBE)
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -663,6 +663,13 @@ void irq_enter_rcu(void)
{
__irq_enter_raw();
+ /*
+ * If this is a nested IRQ that hits the exit_to_user_mode_loop
+ * where it has enabled IRQs but before it has hit schedule()
+ * we could have hrtimers in an undefined state. Fix it up here.
+ */
+ hrtimer_rearm();
+
if (tick_nohz_full_cpu(smp_processor_id()) ||
(is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
tick_irq_enter();
@@ -719,8 +726,14 @@ static inline void __irq_exit_rcu(void)
#endif
account_hardirq_exit(current);
preempt_count_sub(HARDIRQ_OFFSET);
- if (!in_interrupt() && local_softirq_pending())
+ if (!in_interrupt() && local_softirq_pending()) {
+ /*
+ * If we left hrtimers unarmed, make sure to arm them now,
+ * before enabling interrupts to run SoftIRQ.
+ */
+ hrtimer_rearm();
invoke_softirq();
+ }
if (IS_ENABLED(CONFIG_IRQ_FORCED_THREADING) && force_irqthreads() &&
local_timers_pending_force_th() && !(in_nmi() | in_hardirq()))
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1279,8 +1279,8 @@ static int __hrtimer_start_range_ns(stru
if (timer->is_fuzzy) {
/*
- * XXX fuzzy implies pinned! not sure how to deal with
- * retrigger_next_event() for the !local case.
+ * Fuzzy requires pinned as the lazy programming only works
+ * for CPU local timers.
*/
WARN_ON_ONCE(!(mode & HRTIMER_MODE_PINNED));
/*
@@ -1898,7 +1898,7 @@ static __latent_entropy void hrtimer_run
/*
* Very similar to hrtimer_force_reprogram(), except it deals with
- * in_hrirq and hang_detected.
+ * in_hrtirq and hang_detected.
*/
static void __hrtimer_rearm(struct hrtimer_cpu_base *cpu_base,
ktime_t now, ktime_t expires_next)
Powered by blists - more mailing lists