[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230328110354.780171563@infradead.org>
Date: Tue, 28 Mar 2023 11:26:39 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org, vincent.guittot@...aro.org
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
juri.lelli@...hat.com, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, corbet@....net, qyousef@...alina.io,
chris.hyser@...cle.com, patrick.bellasi@...bug.net, pjt@...gle.com,
pavel@....cz, qperret@...gle.com, tim.c.chen@...ux.intel.com,
joshdon@...gle.com, timj@....org, kprateek.nayak@....com,
yu.c.chen@...el.com, youssefesmat@...omium.org,
joel@...lfernandes.org, efault@....de
Subject: [PATCH 17/17] [DEBUG] sched/eevdf: Debug / validation crud
XXX do not merge
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/sched/fair.c | 95 ++++++++++++++++++++++++++++++++++++++++++++++++
kernel/sched/features.h | 2 +
2 files changed, 97 insertions(+)
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -793,6 +793,92 @@ static inline bool min_deadline_update(s
RB_DECLARE_CALLBACKS(static, min_deadline_cb, struct sched_entity,
run_node, min_deadline, min_deadline_update);
+#ifdef CONFIG_SCHED_DEBUG
+struct validate_data {
+ s64 va;
+ s64 avg_vruntime;
+ s64 avg_load;
+ s64 min_deadline;
+};
+
+static void __print_se(struct cfs_rq *cfs_rq, struct sched_entity *se, int level,
+ struct validate_data *data)
+{
+ static const char indent[] = " ";
+ unsigned long weight = scale_load_down(se->load.weight);
+ struct task_struct *p = NULL;
+
+ s64 v = se->vruntime - cfs_rq->min_vruntime;
+ s64 d = se->deadline - cfs_rq->min_vruntime;
+
+ data->avg_vruntime += v * weight;
+ data->avg_load += weight;
+
+ data->min_deadline = min(data->min_deadline, d);
+
+ if (entity_is_task(se))
+ p = task_of(se);
+
+ trace_printk("%.*s%lx w: %ld ve: %Ld lag: %Ld vd: %Ld vmd: %Ld %s (%d/%s)\n",
+ level*2, indent, (unsigned long)se,
+ weight,
+ v, data->va - se->vruntime, d,
+ se->min_deadline - cfs_rq->min_vruntime,
+ entity_eligible(cfs_rq, se) ? "E" : "N",
+ p ? p->pid : -1,
+ p ? p->comm : "(null)");
+}
+
+static void __print_node(struct cfs_rq *cfs_rq, struct rb_node *node, int level,
+ struct validate_data *data)
+{
+ if (!node)
+ return;
+
+ __print_se(cfs_rq, __node_2_se(node), level, data);
+ __print_node(cfs_rq, node->rb_left, level+1, data);
+ __print_node(cfs_rq, node->rb_right, level+1, data);
+}
+
+static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq);
+
+static void validate_cfs_rq(struct cfs_rq *cfs_rq, bool pick)
+{
+ struct sched_entity *curr = cfs_rq->curr;
+ struct rb_node *root = cfs_rq->tasks_timeline.rb_root.rb_node;
+ struct validate_data _data = {
+ .va = avg_vruntime(cfs_rq),
+ .min_deadline = (~0ULL) >> 1,
+ }, *data = &_data;
+
+ trace_printk("---\n");
+
+ __print_node(cfs_rq, root, 0, data);
+
+ trace_printk("min_deadline: %Ld avg_vruntime: %Ld / %Ld = %Ld\n",
+ data->min_deadline,
+ data->avg_vruntime, data->avg_load,
+ data->avg_load ? div_s64(data->avg_vruntime, data->avg_load) : 0);
+
+ if (WARN_ON_ONCE(cfs_rq->avg_vruntime != data->avg_vruntime))
+ cfs_rq->avg_vruntime = data->avg_vruntime;
+
+ if (WARN_ON_ONCE(cfs_rq->avg_load != data->avg_load))
+ cfs_rq->avg_load = data->avg_load;
+
+ data->min_deadline += cfs_rq->min_vruntime;
+ WARN_ON_ONCE(cfs_rq->avg_load && __node_2_se(root)->min_deadline != data->min_deadline);
+
+ if (curr && curr->on_rq)
+ __print_se(cfs_rq, curr, 0, data);
+
+ if (pick)
+ trace_printk("pick: %lx\n", (unsigned long)pick_eevdf(cfs_rq));
+}
+#else
+static inline void validate_cfs_rq(struct cfs_rq *cfs_rq, bool pick) { }
+#endif
+
/*
* Enqueue an entity into the rb-tree:
*/
@@ -802,6 +888,9 @@ static void __enqueue_entity(struct cfs_
se->min_deadline = se->deadline;
rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
__entity_less, &min_deadline_cb);
+
+ if (sched_feat(VALIDATE_QUEUE))
+ validate_cfs_rq(cfs_rq, true);
}
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -809,6 +898,9 @@ static void __dequeue_entity(struct cfs_
rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
&min_deadline_cb);
avg_vruntime_sub(cfs_rq, se);
+
+ if (sched_feat(VALIDATE_QUEUE))
+ validate_cfs_rq(cfs_rq, true);
}
struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
@@ -894,6 +986,9 @@ static struct sched_entity *pick_eevdf(s
if (unlikely(!best)) {
struct sched_entity *left = __pick_first_entity(cfs_rq);
if (left) {
+ trace_printk("EEVDF scheduling fail, picking leftmost\n");
+ validate_cfs_rq(cfs_rq, false);
+ tracing_off();
pr_err("EEVDF scheduling fail, picking leftmost\n");
return left;
}
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -6,6 +6,8 @@ SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
SCHED_FEAT(MINIMAL_VA, false)
+SCHED_FEAT(VALIDATE_QUEUE, false)
+
/*
* Prefer to schedule the task we woke last (assuming it failed
* wakeup-preemption), since its likely going to consume data we
Powered by blists - more mailing lists