[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251201064647.1851919-5-mingo@kernel.org>
Date: Mon, 1 Dec 2025 07:46:45 +0100
From: Ingo Molnar <mingo@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Valentin Schneider <vschneid@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Shrikanth Hegde <sshegde@...ux.ibm.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Mel Gorman <mgorman@...e.de>,
Steven Rostedt <rostedt@...dmis.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...nel.org>
Subject: [PATCH 4/6] sched/fair: Rename avg_vruntime() to cfs_avg_vruntime()
Since the unit of the ->avg_vruntime field isn't actually
the same thing as the avg_vruntime() result, reduce confusion
and rename the latter to the common cfs_*() nomenclature of
visible global functions of the fair scheduler.
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
kernel/sched/debug.c | 2 +-
kernel/sched/fair.c | 10 +++++-----
kernel/sched/sched.h | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 41caa22e0680..a6ceda12bd35 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -829,7 +829,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "zero_vruntime",
SPLIT_NS(zero_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
- SPLIT_NS(avg_vruntime(cfs_rq)));
+ SPLIT_NS(cfs_avg_vruntime(cfs_rq)));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
SPLIT_NS(right_vruntime));
spread = right_vruntime - left_vruntime;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 769d7b7990df..3d6d551168aa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -651,7 +651,7 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
* Specifically: avg_runtime() + 0 must result in entity_eligible() := true
* For this to be so, the result of this function must have a left bias.
*/
-u64 avg_vruntime(struct cfs_rq *cfs_rq)
+u64 cfs_avg_vruntime(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
s64 avg = cfs_rq->avg_vruntime;
@@ -696,7 +696,7 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
WARN_ON_ONCE(!se->on_rq);
- vlag = avg_vruntime(cfs_rq) - se->vruntime;
+ vlag = cfs_avg_vruntime(cfs_rq) - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
se->vlag = clamp(vlag, -limit, limit);
@@ -716,7 +716,7 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
*
* lag_i >= 0 -> \Sum (v_i - v)*w_i >= (v_i - v)*(\Sum w_i)
*
- * Note: using 'avg_vruntime() > se->vruntime' is inaccurate due
+ * Note: using 'cfs_avg_vruntime() > se->vruntime' is inaccurate due
* to the loss in precision caused by the division.
*/
static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
@@ -742,7 +742,7 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void update_zero_vruntime(struct cfs_rq *cfs_rq)
{
- u64 vruntime = avg_vruntime(cfs_rq);
+ u64 vruntime = cfs_avg_vruntime(cfs_rq);
s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime);
avg_vruntime_update(cfs_rq, delta);
@@ -5099,7 +5099,7 @@ void __setparam_fair(struct task_struct *p, const struct sched_attr *attr)
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- u64 vslice, vruntime = avg_vruntime(cfs_rq);
+ u64 vslice, vruntime = cfs_avg_vruntime(cfs_rq);
s64 lag = 0;
if (!se->custom_slice)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6bfcf52a4840..47f7b6df634c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3956,7 +3956,7 @@ static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
static inline void init_sched_mm_cid(struct task_struct *t) { }
#endif /* !CONFIG_SCHED_MM_CID */
-extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
+extern u64 cfs_avg_vruntime(struct cfs_rq *cfs_rq);
extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
static inline
void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_struct *task)
--
2.51.0
Powered by blists - more mailing lists