lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1501773219-18774-1-git-send-email-jbacik@fb.com>
Date:   Thu,  3 Aug 2017 11:13:38 -0400
From:   josef@...icpanda.com
To:     riel@...hat.com, kernel-team@...com, mingo@...hat.com,
        peterz@...radead.org, linux-kernel@...r.kernel.org, tj@...nel.org
Cc:     Josef Bacik <jbacik@...com>
Subject: [PATCH 1/2] sched/fair: use reweight_entity to reweight tasks

From: Josef Bacik <jbacik@...com>

reweight_task only accounts for the load average change in the cfs_rq, but
doesn't account for the runnable_average change in the cfs_rq.  We need to do
everything reweight_entity does, and then we just set our inv_weight
appropriately.

Signed-off-by: Josef Bacik <jbacik@...com>
---
 kernel/sched/fair.c | 31 +++++++++++--------------------
 1 file changed, 11 insertions(+), 20 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0cff1b6..c336534 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2809,26 +2809,6 @@ __sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
 }
 
-void reweight_task(struct task_struct *p, int prio)
-{
-	struct sched_entity *se = &p->se;
-	struct cfs_rq *cfs_rq = cfs_rq_of(se);
-	struct load_weight *load = &p->se.load;
-
-	u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
-
-	__sub_load_avg(cfs_rq, se);
-
-	load->weight = scale_load(sched_prio_to_weight[prio]);
-	load->inv_weight = sched_prio_to_wmult[prio];
-
-	se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
-	se->avg.runnable_load_avg =
-		div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
-
-	__add_load_avg(cfs_rq, se);
-}
-
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 			    unsigned long weight, unsigned long runnable)
 {
@@ -2858,6 +2838,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 	}
 }
 
+void reweight_task(struct task_struct *p, int prio)
+{
+	struct sched_entity *se = &p->se;
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	struct load_weight *load = &se->load;
+	unsigned long weight = scale_load(sched_prio_to_weight[prio]);
+
+	reweight_entity(cfs_rq, se, weight, weight);
+	load->inv_weight = sched_prio_to_wmult[prio];
+}
+
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
 
 /*
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ