[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240819154259.215504-2-axboe@kernel.dk>
Date: Mon, 19 Aug 2024 09:39:46 -0600
From: Jens Axboe <axboe@...nel.dk>
To: linux-kernel@...r.kernel.org
Cc: peterz@...radead.org,
tglx@...utronix.de,
Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 1/4] sched/core: add helpers for iowait handling
Adds helpers to inc/dec the runqueue iowait count, based on the task, and
use those in the spots where the count is manipulated.
Adds an rq_iowait() helper, to abstract out how the per-rq stats are read.
No functional changes in this patch, just in preparation for switching
the type of 'nr_iowait'.
Signed-off-by: Jens Axboe <axboe@...nel.dk>
---
kernel/sched/core.c | 23 +++++++++++++++++++----
kernel/sched/cputime.c | 3 +--
kernel/sched/sched.h | 2 ++
3 files changed, 22 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ab50100363ca..9bf1b67818d0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3541,6 +3541,21 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
#endif /* !CONFIG_SMP */
+static void task_iowait_inc(struct task_struct *p)
+{
+ atomic_inc(&task_rq(p)->nr_iowait);
+}
+
+static void task_iowait_dec(struct task_struct *p)
+{
+ atomic_dec(&task_rq(p)->nr_iowait);
+}
+
+int rq_iowait(struct rq *rq)
+{
+ return atomic_read(&rq->nr_iowait);
+}
+
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
@@ -3607,7 +3622,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
#endif
if (p->in_iowait) {
delayacct_blkio_end(p);
- atomic_dec(&task_rq(p)->nr_iowait);
+ task_iowait_dec(p);
}
activate_task(rq, p, en_flags);
@@ -4184,7 +4199,7 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (task_cpu(p) != cpu) {
if (p->in_iowait) {
delayacct_blkio_end(p);
- atomic_dec(&task_rq(p)->nr_iowait);
+ task_iowait_dec(p);
}
wake_flags |= WF_MIGRATED;
@@ -5282,7 +5297,7 @@ unsigned long long nr_context_switches(void)
unsigned int nr_iowait_cpu(int cpu)
{
- return atomic_read(&cpu_rq(cpu)->nr_iowait);
+ return rq_iowait(cpu_rq(cpu));
}
/*
@@ -6512,7 +6527,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
if (prev->in_iowait) {
- atomic_inc(&rq->nr_iowait);
+ task_iowait_inc(prev);
delayacct_blkio_start();
}
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 0bed0fa1acd9..b826267714de 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -222,9 +222,8 @@ void account_steal_time(u64 cputime)
void account_idle_time(u64 cputime)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
- struct rq *rq = this_rq();
- if (atomic_read(&rq->nr_iowait) > 0)
+ if (rq_iowait(this_rq()) > 0)
cpustat[CPUTIME_IOWAIT] += cputime;
else
cpustat[CPUTIME_IDLE] += cputime;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1e1d1b467af2..b6b3b565bcb1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3579,6 +3579,8 @@ static inline void init_sched_mm_cid(struct task_struct *t) { }
extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
+int rq_iowait(struct rq *rq);
+
#ifdef CONFIG_RT_MUTEXES
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
--
2.43.0
Powered by blists - more mailing lists