[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20260116145208.87445-15-frederic@kernel.org>
Date: Fri, 16 Jan 2026 15:52:07 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
"Christophe Leroy (CS GROUP)" <chleroy@...nel.org>,
"Rafael J. Wysocki" <rafael@...nel.org>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Anna-Maria Behnsen <anna-maria@...utronix.de>,
Ben Segall <bsegall@...gle.com>,
Boqun Feng <boqun.feng@...il.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Heiko Carstens <hca@...ux.ibm.com>,
Ingo Molnar <mingo@...hat.com>,
Jan Kiszka <jan.kiszka@...mens.com>,
Joel Fernandes <joelagnelf@...dia.com>,
Juri Lelli <juri.lelli@...hat.com>,
Kieran Bingham <kbingham@...nel.org>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Mel Gorman <mgorman@...e.de>,
Michael Ellerman <mpe@...erman.id.au>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Nicholas Piggin <npiggin@...il.com>,
"Paul E . McKenney" <paulmck@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Steven Rostedt <rostedt@...dmis.org>,
Sven Schnelle <svens@...ux.ibm.com>,
Thomas Gleixner <tglx@...utronix.de>,
Uladzislau Rezki <urezki@...il.com>,
Valentin Schneider <vschneid@...hat.com>,
Vasily Gorbik <gor@...ux.ibm.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Viresh Kumar <viresh.kumar@...aro.org>,
Xin Zhao <jackzxcui1989@....com>,
linux-pm@...r.kernel.org,
linux-s390@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org
Subject: [PATCH 14/15] sched/cputime: Handle idle irqtime gracefully
The dyntick-idle cputime accounting always assumes that IRQ time
accounting is enabled and consequently stops elapsing the idle time
during dyntick-idle IRQs.
This doesn't mix up well with disabled IRQ time accounting because then
idle IRQs become a cputime blind-spot. Also this feature is disabled
on most configurations and the overhead of pausing dyntick-idle
accounting while in idle IRQs could then be avoided.
Fix the situation with conditionally pausing dyntick-idle accounting
during idle IRQs only if neither native vtime (which does IRQ time
accounting) nor generic IRQ time accounting are enabled.
Also make sure that the accumulated IRQ time is not accidentally
substracted from later accounting.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
---
kernel/sched/cputime.c | 24 +++++++++++++++++++++---
kernel/sched/sched.h | 1 +
2 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index f0620b429698..3dadfaa92b27 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -45,7 +45,8 @@ static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
u64_stats_update_begin(&irqtime->sync);
cpustat[idx] += delta;
irqtime->total += delta;
- irqtime->tick_delta += delta;
+ if (!irqtime->idle_dyntick)
+ irqtime->tick_delta += delta;
u64_stats_update_end(&irqtime->sync);
}
@@ -80,6 +81,16 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
}
+static inline void irqtime_dyntick_start(void)
+{
+ __this_cpu_write(cpu_irqtime.idle_dyntick, true);
+}
+
+static inline void irqtime_dyntick_stop(void)
+{
+ __this_cpu_write(cpu_irqtime.idle_dyntick, false);
+}
+
static u64 irqtime_tick_accounted(u64 maxtime)
{
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
@@ -93,6 +104,9 @@ static u64 irqtime_tick_accounted(u64 maxtime)
#else /* !CONFIG_IRQ_TIME_ACCOUNTING: */
+static inline void irqtime_dyntick_start(void) { }
+static inline void irqtime_dyntick_stop(void) { }
+
static u64 irqtime_tick_accounted(u64 dummy)
{
return 0;
@@ -443,6 +457,7 @@ void kcpustat_dyntick_stop(ktime_t now)
WARN_ON_ONCE(!kc->idle_dyntick);
kcpustat_idle_stop(kc, now);
kc->idle_dyntick = false;
+ irqtime_dyntick_stop();
vtime_dyntick_stop();
steal_account_process_time(ULONG_MAX);
}
@@ -454,6 +469,7 @@ void kcpustat_dyntick_start(ktime_t now)
if (!vtime_generic_enabled_this_cpu()) {
vtime_dyntick_start();
+ irqtime_dyntick_start();
kc->idle_dyntick = true;
kcpustat_idle_start(kc, now);
}
@@ -463,7 +479,8 @@ void kcpustat_irq_enter(ktime_t now)
{
struct kernel_cpustat *kc = kcpustat_this_cpu;
- if (!vtime_generic_enabled_this_cpu())
+ if (!vtime_generic_enabled_this_cpu() &&
+ (irqtime_enabled() || vtime_accounting_enabled_this_cpu()))
kcpustat_idle_stop(kc, now);
}
@@ -471,7 +488,8 @@ void kcpustat_irq_exit(ktime_t now)
{
struct kernel_cpustat *kc = kcpustat_this_cpu;
- if (!vtime_generic_enabled_this_cpu())
+ if (!vtime_generic_enabled_this_cpu() &&
+ (irqtime_enabled() || vtime_accounting_enabled_this_cpu()))
kcpustat_idle_start(kc, now);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d30cca6870f5..cf677ff12b10 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3307,6 +3307,7 @@ static inline void sched_core_tick(struct rq *rq) { }
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
struct irqtime {
+ bool idle_dyntick;
u64 total;
u64 tick_delta;
u64 irq_start_time;
--
2.51.1
Powered by blists - more mailing lists