[<prev] [next>] [day] [month] [year] [list]
Message-ID: <3a0c974c-41d4-4203-9ffd-f2ec4da898aa@gmail.com>
Date: Mon, 14 Oct 2024 23:50:31 +0800
From: Benjamin Tang <tangsong8264@...il.com>
To: Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>, linux-kernel@...r.kernel.org,
Benjamin Tang <tangsong8264@...il.com>
Subject: [PATCH RFC] sched/fair: Rename vruntime_gt() to field_gt()
Since commit aef6987d8954 ("sched/eevdf: Propagate min_slice up the cgroup
hierarchy") introduced min_slice, we can update min_slice just like we do
in __min_vruntime_update(). Rename vruntime_gt() to field_gt().
At the same time, we can use field_gt() to replace entity_before().
Signed-off-by: Benjamin Tang <tangsong8264@...il.com>
---
kernel/sched/fair.c | 24 +++++++-----------------
1 file changed, 7 insertions(+), 17 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b9784e13e6b6..598a7d38be06 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -541,16 +541,6 @@ static inline u64 min_vruntime(u64 min_vruntime,
u64 vruntime)
return min_vruntime;
}
-static inline bool entity_before(const struct sched_entity *a,
- const struct sched_entity *b)
-{
- /*
- * Tiebreak on vruntime seems unnecessary since it can
- * hardly happen.
- */
- return (s64)(a->deadline - b->deadline) < 0;
-}
-
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct
sched_entity *se)
{
return (s64)(se->vruntime - cfs_rq->min_vruntime);
@@ -797,18 +787,18 @@ static inline u64 cfs_rq_min_slice(struct cfs_rq
*cfs_rq)
return min_slice;
}
+#define field_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field)
> 0; })
+
static inline bool __entity_less(struct rb_node *a, const struct
rb_node *b)
{
- return entity_before(__node_2_se(a), __node_2_se(b));
+ return field_gt(deadline, __node_2_se(b), __node_2_se(a));
}
-#define vruntime_gt(field, lse, rse) ({ (s64)((lse)->field -
(rse)->field) > 0; })
-
static inline void __min_vruntime_update(struct sched_entity *se,
struct rb_node *node)
{
if (node) {
struct sched_entity *rse = __node_2_se(node);
- if (vruntime_gt(min_vruntime, se, rse))
+ if (field_gt(min_vruntime, se, rse))
se->min_vruntime = rse->min_vruntime;
}
}
@@ -817,7 +807,7 @@ static inline void __min_slice_update(struct
sched_entity *se, struct rb_node *n
{
if (node) {
struct sched_entity *rse = __node_2_se(node);
- if (rse->min_slice < se->min_slice)
+ if (field_gt(min_slice, se, rse))
se->min_slice = rse->min_slice;
}
}
@@ -963,7 +953,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq
*cfs_rq)
node = node->rb_right;
}
found:
- if (!best || (curr && entity_before(curr, best)))
+ if (!best || (curr && field_gt(deadline, best, curr)))
best = curr;
return best;
@@ -1186,7 +1176,7 @@ static inline bool do_preempt_short(struct cfs_rq
*cfs_rq,
if (!entity_eligible(cfs_rq, pse))
return false;
- if (entity_before(pse, se))
+ if (field_gt(deadline, se, pse))
return true;
if (!entity_eligible(cfs_rq, se))
--
2.11.0
Powered by blists - more mailing lists