[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1500038464-8742-2-git-send-email-josef@toxicpanda.com>
Date: Fri, 14 Jul 2017 13:20:58 +0000
From: Josef Bacik <josef@...icpanda.com>
To: mingo@...hat.com, peterz@...radead.org,
linux-kernel@...r.kernel.org, umgwanakikbuti@...il.com,
tj@...nel.org, kernel-team@...com
Cc: Josef Bacik <jbacik@...com>
Subject: [PATCH 1/7] sched/fair: use reweight_entity to reweight tasks
From: Josef Bacik <jbacik@...com>
reweight_task only accounts for the load average change in the cfs_rq, but
doesn't account for the runnable_average change in the cfs_rq. We need to do
everything reweight_entity does, and then we just set our inv_weight
appropriately.
Signed-off-by: Josef Bacik <jbacik@...com>
---
kernel/sched/fair.c | 31 +++++++++++--------------------
1 file changed, 11 insertions(+), 20 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b13b451..326bc55 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2808,26 +2808,6 @@ __sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
}
-void reweight_task(struct task_struct *p, int prio)
-{
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- struct load_weight *load = &p->se.load;
-
- u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
-
- __sub_load_avg(cfs_rq, se);
-
- load->weight = scale_load(sched_prio_to_weight[prio]);
- load->inv_weight = sched_prio_to_wmult[prio];
-
- se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
- se->avg.runnable_load_avg =
- div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
-
- __add_load_avg(cfs_rq, se);
-}
-
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight, unsigned long runnable)
{
@@ -2857,6 +2837,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
}
}
+void reweight_task(struct task_struct *p, int prio)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct load_weight *load = &se->load;
+ unsigned long weight = scale_load(sched_prio_to_weight[prio]);
+
+ reweight_entity(cfs_rq, se, weight, weight);
+ load->inv_weight = sched_prio_to_wmult[prio];
+}
+
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
/*
--
2.9.3
Powered by blists - more mailing lists