lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1428377792-6180-1-git-send-email-wanpeng.li@linux.intel.com>
Date:	Tue,  7 Apr 2015 11:36:32 +0800
From:	Wanpeng Li <wanpeng.li@...ux.intel.com>
To:	Ingo Molnar <mingo@...hat.com>,
	Peter Zijlstra <peterz@...radead.org>
Cc:	Juri Lelli <juri.lelli@....com>, linux-kernel@...r.kernel.org,
	Wanpeng Li <wanpeng.li@...ux.intel.com>
Subject: [PATCH] sched/deadline: fix dl bandwidth of root domain overflow after dl task dead

The total used dl bandwidth of each root domain will be reset to 0 after 
cpu hotplug when rebuild sched domains, since the call path is:

_cpu_down
  cpuset_cpu_inactive() 
    cpuset_update_active_cpus()
      partition_sched_domains()
        build_sched_domains() 
          init_rootdomain() 
            init_dl_bw() 

The bandwidth which dl task occupy will be released when dl task dead,
it will be minus from total used dl bandwidth of its root domain, 
however, bandwidth overflow occurs since total used dl bandwidth is 0.

This patch fix it by attaching the bandwidth which dl task occupy to 
the new root domain when the task is migrating since cpu hotplug, and
attach all the used dl bandwidth of dl tasks to the new root domain 
when sched domains are rebuild.

Signed-off-by: Wanpeng Li <wanpeng.li@...ux.intel.com>
---
 kernel/sched/core.c     |  1 +
 kernel/sched/deadline.c | 25 +++++++++++++++++++++++++
 kernel/sched/sched.h    |  1 +
 3 files changed, 27 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 28b0d75..c940999 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5586,6 +5586,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
 	rq->rd = rd;
 
 	cpumask_set_cpu(rq->cpu, rd->span);
+	attach_dl_bw(rq);
 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
 		set_rq_online(rq);
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 5e95145..62680d7 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -224,6 +224,7 @@ static void dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 {
 	struct rq *later_rq = NULL;
 	bool fallback = false;
+	struct dl_bw *dl_b;
 
 	later_rq = find_lock_later_rq(p, rq);
 
@@ -258,6 +259,11 @@ static void dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 	set_task_cpu(p, later_rq->cpu);
 	activate_task(later_rq, p, ENQUEUE_REPLENISH);
 
+	dl_b = dl_bw_of(later_rq->cpu);
+	raw_spin_lock(&dl_b->lock);
+	__dl_add(dl_b, p->dl.dl_bw);
+	raw_spin_unlock(&dl_b->lock);
+
 	if (!fallback)
 		resched_curr(later_rq);
 
@@ -1776,6 +1782,25 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
 		switched_to_dl(rq, p);
 }
 
+void attach_dl_bw(struct rq *rq)
+{
+	struct rb_node *next_node = rq->dl.rb_leftmost;
+	struct sched_dl_entity *dl_se;
+	struct dl_bw *dl_b;
+
+	dl_b = dl_bw_of(rq->cpu);
+	raw_spin_lock(&dl_b->lock);
+next_node:
+	if (next_node) {
+		dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
+		__dl_add(dl_b, dl_se->dl_bw);
+		next_node = rb_next(next_node);
+
+		goto next_node;
+	}
+	raw_spin_unlock(&dl_b->lock);
+}
+
 const struct sched_class dl_sched_class = {
 	.next			= &rt_sched_class,
 	.enqueue_task		= enqueue_task_dl,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e0e1299..a7b1a59 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1676,6 +1676,7 @@ extern void init_dl_rq(struct dl_rq *dl_rq);
 
 extern void cfs_bandwidth_usage_inc(void);
 extern void cfs_bandwidth_usage_dec(void);
+void attach_dl_bw(struct rq *rq);
 
 #ifdef CONFIG_NO_HZ_COMMON
 enum rq_nohz_flag_bits {
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ