[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230531124604.205287511@infradead.org>
Date: Wed, 31 May 2023 13:58:48 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org, vincent.guittot@...aro.org
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
juri.lelli@...hat.com, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, corbet@....net, qyousef@...alina.io,
chris.hyser@...cle.com, patrick.bellasi@...bug.net, pjt@...gle.com,
pavel@....cz, qperret@...gle.com, tim.c.chen@...ux.intel.com,
joshdon@...gle.com, timj@....org, kprateek.nayak@....com,
yu.c.chen@...el.com, youssefesmat@...omium.org,
joel@...lfernandes.org, efault@....de, tglx@...utronix.de
Subject: [PATCH 09/15] sched/debug: Rename min_granularity to base_slice
EEVDF uses this tunable as the base request/slice -- make sure the
name reflects this.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 4 ++--
kernel/sched/fair.c | 12 ++++++------
kernel/sched/sched.h | 2 +-
4 files changed, 10 insertions(+), 10 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4464,7 +4464,7 @@ static void __sched_fork(unsigned long c
p->se.nr_migrations = 0;
p->se.vruntime = 0;
p->se.vlag = 0;
- p->se.slice = sysctl_sched_min_granularity;
+ p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_FAIR_GROUP_SCHED
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -347,7 +347,7 @@ static __init int sched_init_debug(void)
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
#endif
- debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
+ debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
@@ -862,7 +862,7 @@ static void sched_debug_header(struct se
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
- PN(sysctl_sched_min_granularity);
+ PN(sysctl_sched_base_slice);
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
#undef PN
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -75,8 +75,8 @@ unsigned int sysctl_sched_tunable_scalin
*
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity = 750000ULL;
-static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
+unsigned int sysctl_sched_base_slice = 750000ULL;
+static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
/*
* After fork, child runs first. If set to 0 (default) then
@@ -237,7 +237,7 @@ static void update_sysctl(void)
#define SET_SYSCTL(name) \
(sysctl_##name = (factor) * normalized_sysctl_##name)
- SET_SYSCTL(sched_min_granularity);
+ SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
}
@@ -943,7 +943,7 @@ int sched_update_scaling(void)
#define WRT_SYSCTL(name) \
(normalized_sysctl_##name = sysctl_##name / (factor))
- WRT_SYSCTL(sched_min_granularity);
+ WRT_SYSCTL(sched_base_slice);
#undef WRT_SYSCTL
return 0;
@@ -964,9 +964,9 @@ static void update_deadline(struct cfs_r
/*
* For EEVDF the virtual time slope is determined by w_i (iow.
* nice) while the request time r_i is determined by
- * sysctl_sched_min_granularity.
+ * sysctl_sched_base_slice.
*/
- se->slice = sysctl_sched_min_granularity;
+ se->slice = sysctl_sched_base_slice;
/*
* EEVDF: vd_i = ve_i + r_i / w_i
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2479,7 +2479,7 @@ extern void check_preempt_curr(struct rq
extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
-extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_base_slice;
#ifdef CONFIG_SCHED_DEBUG
extern int sysctl_resched_latency_warn_ms;
Powered by blists - more mailing lists