[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20250422-b4-eevdf-tests-v1-post-v1-2-5b174f040f55@gianis.ca>
Date: Wed, 23 Apr 2025 00:15:43 +0000
From: "Dhaval Giani (AMD)" <dhaval@...nis.ca>
Cc: linux-kernel@...r.kernel.org, Dhaval Giani <dhaval.giani@....com>, Gautham Shenoy <gautham.shenoy@....com>, K Prateek Nayak <kprateek.nayak@....com>, "Dhaval Giani (AMD)" <dhaval@...nis.ca>
Subject: [PATCH 2/3] sched/fair: Add a test to test that a task selected to run has positive lag
Lemma 1 from the original EEVDF paper says that any task that
has a positive lag is eligible to run.
This test tests the opposite - any task that is picked to run must
have a positive lag for it to be eligible to run.
Signed-off-by: Dhaval Giani (AMD) <dhaval@...nis.ca>
---
kernel/sched/eevdf-tests.c | 41 +++++++++++++++++++++++++++++++++++++++++
kernel/sched/fair.c | 2 ++
kernel/sched/sched.h | 2 ++
3 files changed, 45 insertions(+)
diff --git a/kernel/sched/eevdf-tests.c b/kernel/sched/eevdf-tests.c
index 3bc016d3025733e53f586e30fcd31f650156d47e..8532330769bcc93dbf9cd98ebba75c838f62c045 100644
--- a/kernel/sched/eevdf-tests.c
+++ b/kernel/sched/eevdf-tests.c
@@ -18,10 +18,51 @@
#ifdef CONFIG_SCHED_EEVDF_TESTING
+/*
+ * Test parameters
+ */
+bool eevdf_positive_lag_test;
+u8 eevdf_positive_lag_count = 10;
+
static struct dentry *debugfs_eevdf_testing;
void debugfs_eevdf_testing_init(struct dentry *debugfs_sched)
{
debugfs_eevdf_testing = debugfs_create_dir("eevdf-testing", debugfs_sched);
+ debugfs_create_bool("eevdf_positive_lag_test", 0700,
+ debugfs_eevdf_testing, &eevdf_positive_lag_test);
+ debugfs_create_u8("eevdf_positive_lag_test_count", 0600,
+ debugfs_eevdf_testing, &eevdf_positive_lag_count);
+
}
+
+void test_eevdf_positive_lag(struct cfs_rq *cfs, struct sched_entity *se)
+{
+ static int eevdf_positive_lag_test_counter;
+ u64 eevdf_average_vruntime;
+
+ if (!eevdf_positive_lag_test)
+ return;
+
+ if (!se || !cfs)
+ return;
+
+ eevdf_average_vruntime = avg_vruntime(cfs);
+ eevdf_positive_lag_test_counter++;
+
+ if (se->vruntime > eevdf_average_vruntime) {
+ trace_printk("FAIL: Lemma 1 failed - selected task has negative lag\n");
+ eevdf_positive_lag_test = 0;
+ eevdf_positive_lag_test_counter = 0;
+ return;
+ }
+
+ if (eevdf_positive_lag_test_counter > eevdf_positive_lag_count) {
+ eevdf_positive_lag_test = 0;
+ eevdf_positive_lag_test_counter = 0;
+ trace_printk("PASS: At least %u selected tasks had positive lag\n",
+ eevdf_positive_lag_count);
+ }
+}
+
#endif /* CONFIG_SCHED_EEVDF_TESTING */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index eb5a2572b4f8b6b5517befc299312b6ae7476e88..924d9d35c2aa937bc0f4ca9565ba774397b90f77 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -980,6 +980,8 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
if (!best || (curr && entity_before(curr, best)))
best = curr;
+ test_eevdf_positive_lag(cfs_rq, best);
+
return best;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 09cefe2aa871bbd533a413c76026895e969a58e7..5ad5e033e1c81167b712ab176d4d55e6b5d82d06 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3990,8 +3990,10 @@ void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
#ifdef CONFIG_SCHED_EEVDF_TESTING
void debugfs_eevdf_testing_init(struct dentry *debugfs_sched);
+void test_eevdf_positive_lag(struct cfs_rq *cfs, struct sched_entity *se);
#else /* CONFIG_SCHED_EEVDF_TESTING */
static inline void init_eevdf_testing_debugfs(struct dentry *debugfs_sched) {}
+static inline void test_eevdf_positive_lag(struct cfs_rq *cfs, struct sched_entity *se) {}
#endif /* CONFIG_SCHED_EEVDF_TESTING */
#endif /* _KERNEL_SCHED_SCHED_H */
--
2.49.0
Powered by blists - more mailing lists