[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260116145208.87445-4-frederic@kernel.org>
Date: Fri, 16 Jan 2026 15:51:56 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
"Christophe Leroy (CS GROUP)" <chleroy@...nel.org>,
"Rafael J. Wysocki" <rafael@...nel.org>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Anna-Maria Behnsen <anna-maria@...utronix.de>,
Ben Segall <bsegall@...gle.com>,
Boqun Feng <boqun.feng@...il.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Heiko Carstens <hca@...ux.ibm.com>,
Ingo Molnar <mingo@...hat.com>,
Jan Kiszka <jan.kiszka@...mens.com>,
Joel Fernandes <joelagnelf@...dia.com>,
Juri Lelli <juri.lelli@...hat.com>,
Kieran Bingham <kbingham@...nel.org>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Mel Gorman <mgorman@...e.de>,
Michael Ellerman <mpe@...erman.id.au>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Nicholas Piggin <npiggin@...il.com>,
"Paul E . McKenney" <paulmck@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Steven Rostedt <rostedt@...dmis.org>,
Sven Schnelle <svens@...ux.ibm.com>,
Thomas Gleixner <tglx@...utronix.de>,
Uladzislau Rezki <urezki@...il.com>,
Valentin Schneider <vschneid@...hat.com>,
Vasily Gorbik <gor@...ux.ibm.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Viresh Kumar <viresh.kumar@...aro.org>,
Xin Zhao <jackzxcui1989@....com>,
linux-pm@...r.kernel.org,
linux-s390@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org
Subject: [PATCH 03/15] sched/cputime: Correctly support generic vtime idle time
Currently whether generic vtime is running or not, the idle cputime is
fetched from the nohz accounting.
However generic vtime already does its own idle cputime accounting. Only
the kernel stat accessors are not plugged to support it.
Read the idle generic vtime cputime when it's running, this will allow
to later more clearly split nohz and vtime cputime accounting.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
---
fs/proc/stat.c | 8 ++++----
include/linux/vtime.h | 7 ++++++-
kernel/sched/cputime.c | 38 +++++++++++++++++++++++++++++++-------
kernel/time/tick-sched.c | 2 +-
4 files changed, 42 insertions(+), 13 deletions(-)
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 8b444e862319..6ac2a13b8be5 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -30,8 +30,8 @@ u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
idle_usecs = get_cpu_idle_time_us(cpu, NULL);
if (idle_usecs == -1ULL)
- /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
- idle = kcs->cpustat[CPUTIME_IDLE];
+ /* !NO_HZ or cpu offline or vtime so we can rely on cpustat.idle */
+ idle = kcpustat_field(CPUTIME_IDLE, cpu);
else
idle = idle_usecs * NSEC_PER_USEC;
@@ -46,8 +46,8 @@ static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
if (iowait_usecs == -1ULL)
- /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
- iowait = kcs->cpustat[CPUTIME_IOWAIT];
+ /* !NO_HZ or cpu offline or vtime so we can rely on cpustat.iowait */
+ iowait = kcpustat_field(CPUTIME_IOWAIT, cpu);
else
iowait = iowait_usecs * NSEC_PER_USEC;
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index 29dd5b91dd7d..737930f66c3e 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -27,6 +27,11 @@ static inline void vtime_guest_exit(struct task_struct *tsk) { }
static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
#endif
+static inline bool vtime_generic_enabled_cpu(int cpu)
+{
+ return context_tracking_enabled_cpu(cpu);
+}
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
extern void vtime_account_softirq(struct task_struct *tsk);
@@ -74,7 +79,7 @@ static inline bool vtime_accounting_enabled(void)
static inline bool vtime_accounting_enabled_cpu(int cpu)
{
- return context_tracking_enabled_cpu(cpu);
+ return vtime_generic_enabled_cpu(cpu);
}
static inline bool vtime_accounting_enabled_this_cpu(void)
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 5dcb0f2e01bc..f32c169da11a 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -761,7 +761,11 @@ EXPORT_SYMBOL_GPL(vtime_guest_exit);
void vtime_account_idle(struct task_struct *tsk)
{
- account_idle_time(get_vtime_delta(&tsk->vtime));
+ struct vtime *vtime = &tsk->vtime;
+
+ write_seqcount_begin(&vtime->seqcount);
+ account_idle_time(get_vtime_delta(vtime));
+ write_seqcount_end(&vtime->seqcount);
}
void vtime_task_switch_generic(struct task_struct *prev)
@@ -912,6 +916,7 @@ static int kcpustat_field_vtime(u64 *cpustat,
int cpu, u64 *val)
{
struct vtime *vtime = &tsk->vtime;
+ struct rq *rq = cpu_rq(cpu);
unsigned int seq;
do {
@@ -953,6 +958,14 @@ static int kcpustat_field_vtime(u64 *cpustat,
if (state == VTIME_GUEST && task_nice(tsk) > 0)
*val += vtime->gtime + vtime_delta(vtime);
break;
+ case CPUTIME_IDLE:
+ if (state == VTIME_IDLE && !atomic_read(&rq->nr_iowait))
+ *val += vtime_delta(vtime);
+ break;
+ case CPUTIME_IOWAIT:
+ if (state == VTIME_IDLE && atomic_read(&rq->nr_iowait) > 0)
+ *val += vtime_delta(vtime);
+ break;
default:
break;
}
@@ -1015,8 +1028,8 @@ static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
*dst = *src;
cpustat = dst->cpustat;
- /* Task is sleeping, dead or idle, nothing to add */
- if (state < VTIME_SYS)
+ /* Task is sleeping or dead, nothing to add */
+ if (state < VTIME_IDLE)
continue;
delta = vtime_delta(vtime);
@@ -1025,15 +1038,17 @@ static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
* Task runs either in user (including guest) or kernel space,
* add pending nohz time to the right place.
*/
- if (state == VTIME_SYS) {
+ switch (vtime->state) {
+ case VTIME_SYS:
cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
- } else if (state == VTIME_USER) {
+ break;
+ case VTIME_USER:
if (task_nice(tsk) > 0)
cpustat[CPUTIME_NICE] += vtime->utime + delta;
else
cpustat[CPUTIME_USER] += vtime->utime + delta;
- } else {
- WARN_ON_ONCE(state != VTIME_GUEST);
+ break;
+ case VTIME_GUEST:
if (task_nice(tsk) > 0) {
cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
cpustat[CPUTIME_NICE] += vtime->gtime + delta;
@@ -1041,6 +1056,15 @@ static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
cpustat[CPUTIME_USER] += vtime->gtime + delta;
}
+ break;
+ case VTIME_IDLE:
+ if (atomic_read(&cpu_rq(cpu)->nr_iowait) > 0)
+ cpustat[CPUTIME_IOWAIT] += delta;
+ else
+ cpustat[CPUTIME_IDLE] += delta;
+ break;
+ default:
+ WARN_ON_ONCE(1);
}
} while (read_seqcount_retry(&vtime->seqcount, seq));
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 8ddf74e705d3..f1d07a0276a5 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -780,7 +780,7 @@ static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime,
ktime_t now, idle;
unsigned int seq;
- if (!tick_nohz_active)
+ if (!tick_nohz_active || vtime_generic_enabled_cpu(cpu))
return -1;
now = ktime_get();
--
2.51.1
Powered by blists - more mailing lists