[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230306141502.449738212@infradead.org>
Date: Mon, 06 Mar 2023 14:25:25 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org, vincent.guittot@...aro.org
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
juri.lelli@...hat.com, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, corbet@....net, qyousef@...alina.io,
chris.hyser@...cle.com, patrick.bellasi@...bug.net, pjt@...gle.com,
pavel@....cz, qperret@...gle.com, tim.c.chen@...ux.intel.com,
joshdon@...gle.com, timj@....org, kprateek.nayak@....com,
yu.c.chen@...el.com, youssefesmat@...omium.org,
joel@...lfernandes.org
Subject: [PATCH 04/10] sched/fair: Add latency_offset
From: Vincent Guittot <vincent.guittot@...aro.org>
XXX fold back into previous patches
Murdered-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Vincent Guittot <vincent.guittot@...aro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Tested-by: K Prateek Nayak <kprateek.nayak@....com>
---
include/linux/sched.h | 4 +++-
include/linux/sched/prio.h | 9 +++++++++
init/init_task.c | 2 +-
kernel/sched/core.c | 21 ++++++++++++++++-----
kernel/sched/debug.c | 2 +-
kernel/sched/fair.c | 8 ++++++++
kernel/sched/sched.h | 2 ++
7 files changed, 40 insertions(+), 8 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -568,6 +568,8 @@ struct sched_entity {
/* cached value of my_q->h_nr_running */
unsigned long runnable_weight;
#endif
+ /* preemption offset in ns */
+ long latency_offset;
#ifdef CONFIG_SMP
/*
@@ -784,7 +786,7 @@ struct task_struct {
int static_prio;
int normal_prio;
unsigned int rt_priority;
- int latency_nice;
+ int latency_prio;
struct sched_entity se;
struct sched_rt_entity rt;
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -59,5 +59,14 @@ static inline long rlimit_to_nice(long p
* Default tasks should be treated as a task with latency_nice = 0.
*/
#define DEFAULT_LATENCY_NICE 0
+#define DEFAULT_LATENCY_PRIO (DEFAULT_LATENCY_NICE + LATENCY_NICE_WIDTH/2)
+
+/*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static latency [ 0..39 ],
+ * and back.
+ */
+#define NICE_TO_LATENCY(nice) ((nice) + DEFAULT_LATENCY_PRIO)
+#define LATENCY_TO_NICE(prio) ((prio) - DEFAULT_LATENCY_PRIO)
#endif /* _LINUX_SCHED_PRIO_H */
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -78,7 +78,7 @@ struct task_struct init_task
.prio = MAX_PRIO - 20,
.static_prio = MAX_PRIO - 20,
.normal_prio = MAX_PRIO - 20,
- .latency_nice = DEFAULT_LATENCY_NICE,
+ .latency_prio = DEFAULT_LATENCY_PRIO,
.policy = SCHED_NORMAL,
.cpus_ptr = &init_task.cpus_mask,
.user_cpus_ptr = NULL,
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1285,6 +1285,11 @@ static void set_load_weight(struct task_
}
}
+static void set_latency_offset(struct task_struct *p)
+{
+ p->se.latency_offset = calc_latency_offset(p->latency_prio);
+}
+
#ifdef CONFIG_UCLAMP_TASK
/*
* Serializes updates of utilization clamp values
@@ -4433,6 +4438,8 @@ static void __sched_fork(unsigned long c
p->se.vruntime = 0;
INIT_LIST_HEAD(&p->se.group_node);
+ set_latency_offset(p);
+
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = NULL;
#endif
@@ -4684,7 +4691,9 @@ int sched_fork(unsigned long clone_flags
p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
- p->latency_nice = DEFAULT_LATENCY_NICE;
+ p->latency_prio = NICE_TO_LATENCY(0);
+ set_latency_offset(p);
+
/*
* We don't need the reset flag anymore after the fork. It has
* fulfilled its duty:
@@ -7456,8 +7465,10 @@ static void __setscheduler_params(struct
static void __setscheduler_latency(struct task_struct *p,
const struct sched_attr *attr)
{
- if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE)
- p->latency_nice = attr->sched_latency_nice;
+ if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) {
+ p->latency_prio = NICE_TO_LATENCY(attr->sched_latency_nice);
+ set_latency_offset(p);
+ }
}
/*
@@ -7642,7 +7653,7 @@ static int __sched_setscheduler(struct t
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
goto change;
if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE &&
- attr->sched_latency_nice != p->latency_nice)
+ attr->sched_latency_nice != LATENCY_TO_NICE(p->latency_prio))
goto change;
p->sched_reset_on_fork = reset_on_fork;
@@ -8183,7 +8194,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pi
get_params(p, &kattr);
kattr.sched_flags &= SCHED_FLAG_ALL;
- kattr.sched_latency_nice = p->latency_nice;
+ kattr.sched_latency_nice = LATENCY_TO_NICE(p->latency_prio);
#ifdef CONFIG_UCLAMP_TASK
/*
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -1043,7 +1043,7 @@ void proc_sched_show_task(struct task_st
#endif
P(policy);
P(prio);
- P(latency_nice);
+ P(latency_prio);
if (task_has_dl_policy(p)) {
P(dl.runtime);
P(dl.deadline);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -703,6 +703,14 @@ int sched_update_scaling(void)
}
#endif
+long calc_latency_offset(int prio)
+{
+ u32 weight = sched_prio_to_weight[prio];
+ u64 base = sysctl_sched_min_granularity;
+
+ return div_u64(base << SCHED_FIXEDPOINT_SHIFT, weight);
+}
+
/*
* delta /= w
*/
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2475,6 +2475,8 @@ extern unsigned int sysctl_numa_balancin
extern unsigned int sysctl_numa_balancing_hot_threshold;
#endif
+extern long calc_latency_offset(int prio);
+
#ifdef CONFIG_SCHED_HRTICK
/*
Powered by blists - more mailing lists