lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 5 Oct 2015 18:16:24 +0900
From:	<byungchul.park@....com>
To:	mingo@...nel.org, peterz@...radead.org
CC:	linux-kernel@...r.kernel.org, yuyang.du@...el.com, pjt@...gle.com,
	efault@....de, tglx@...utronix.de,
	Byungchul Park <byungchul.park@....com>
Subject: [PATCH 2/2] sched: make fair sched class can handle migration by other class

From: Byungchul Park <byungchul.park@....com>

Original fair sched class can handle the migration within its class
with migrate_task_rq_fair(), but there is no way to know it if the
migration happened outside. This patch makes the fair sched class
can handle the migration which happened even at other sched class.
And care of the case that rq lock is not held, is taken properly.

Signed-off-by: Byungchul Park <byungchul.park@....com>
---
 include/linux/sched.h |    3 +++
 kernel/sched/core.c   |   17 +++++++++++--
 kernel/sched/fair.c   |   65 ++++++++++++++++++++++++++++++++++++++++---------
 kernel/sched/sched.h  |    3 ++-
 4 files changed, 74 insertions(+), 14 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 699228b..976cd9c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1235,6 +1235,9 @@ struct sched_entity {
 	struct list_head	group_node;
 	unsigned int		on_rq;
 
+	/* for indicating if a migration has happened. */
+	int			migrated;
+
 	u64			exec_start;
 	u64			sum_exec_runtime;
 	u64			vruntime;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 41b735e..d0c3c0b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1264,6 +1264,9 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
+	const struct sched_class *class;
+	int migrated = 0;
+
 #ifdef CONFIG_SCHED_DEBUG
 	/*
 	 * We should never call set_task_cpu() on a blocked task,
@@ -1291,13 +1294,23 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 	trace_sched_migrate_task(p, new_cpu);
 
 	if (task_cpu(p) != new_cpu) {
-		if (p->sched_class->migrate_task_rq)
-			p->sched_class->migrate_task_rq(p, new_cpu);
+		for_each_class(class) {
+			if (class->migrate_task_rq_from)
+				class->migrate_task_rq_from(p, new_cpu);
+		}
+		migrated = 1;
 		p->se.nr_migrations++;
 		perf_event_task_migrate(p);
 	}
 
 	__set_task_cpu(p, new_cpu);
+
+	if (migrated) {
+		for_each_class(class) {
+			if (class->migrate_task_rq_to)
+				class->migrate_task_rq_to(p, new_cpu);
+		}
+	}
 }
 
 static void __migrate_swap_task(struct task_struct *p, int cpu)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8dabc5..31ae787 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2775,9 +2775,9 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 	struct sched_avg *sa = &se->avg;
 	u64 now = cfs_rq_clock_task(cfs_rq);
-	int migrated, decayed;
+	int decayed;
+	int migrated = xchg(&se->migrated, 0);
 
-	migrated = !sa->last_update_time;
 	if (!migrated) {
 		__update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
 			se->on_rq * scale_load_down(se->load.weight),
@@ -2808,11 +2808,8 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 		max_t(s64,  cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
 }
 
-/*
- * Task first catches up with cfs_rq, and then subtract
- * itself from the cfs_rq (task must be off the queue now).
- */
-void remove_entity_load_avg(struct sched_entity *se)
+/* The caller only guarantees p->pi_lock is held. */
+static void __update_entity_load_avg(struct sched_entity *se)
 {
 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 	u64 last_update_time;
@@ -2830,11 +2827,28 @@ void remove_entity_load_avg(struct sched_entity *se)
 #endif
 
 	__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
+}
+
+/* The caller only guarantees p->pi_lock is held. */
+static void __remove_entity_load_avg(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
 	atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
 	atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
 }
 
 /*
+ * Task first catches up with cfs_rq, and then subtract
+ * itself from the cfs_rq (task must be off the queue now).
+ */
+void remove_entity_load_avg(struct sched_entity *se)
+{
+	__update_entity_load_avg(se);
+	__remove_entity_load_avg(se);
+}
+
+/*
  * Update the rq's load with the elapsed running time before entering
  * idle. if the last scheduled task is not a CFS task, idle_enter will
  * be the only way to update the runnable statistic.
@@ -5009,7 +5023,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
  * previous cpu.  However, the caller only guarantees p->pi_lock is held; no
  * other assumptions, including the state of rq->lock, should be made.
  */
-static void migrate_task_rq_fair(struct task_struct *p, int next_cpu)
+static void migrate_task_rq_from_fair(struct task_struct *p, int next_cpu)
 {
 	/*
 	 * We are supposed to update the task to "current" time, then its up to date
@@ -5018,15 +5032,43 @@ static void migrate_task_rq_fair(struct task_struct *p, int next_cpu)
 	 * will result in the wakee task is less decayed, but giving the wakee more
 	 * load sounds not bad.
 	 */
-	remove_entity_load_avg(&p->se);
+	__update_entity_load_avg(&p->se);
+	if (p->sched_class == &fair_sched_class)
+		__remove_entity_load_avg(&p->se);
 
 	/* Tell new CPU we are migrated */
-	p->se.avg.last_update_time = 0;
+	p->se.migrated = 1;
 
 	/* We have migrated, no longer consider this task hot */
 	p->se.exec_start = 0;
 }
 
+/*
+ * Called immediately after a task is migrated to a new cpu; task_cpu(p) and
+ * cfs_rq_of(p) references at time of call identify the next cpu.  However,
+ * the caller only guarantees p->pi_lock is held; no other assumptions,
+ * including the state of rq->lock, should be made.
+ */
+static void migrate_task_rq_to_fair(struct task_struct *p, int next_cpu)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
+	u64 last_update_time;
+
+#ifndef CONFIG_64BIT
+	u64 last_update_time_copy;
+
+	do {
+		last_update_time_copy = cfs_rq->load_last_update_time_copy;
+		smp_rmb();
+		last_update_time = cfs_rq->avg.last_update_time;
+	} while (last_update_time != last_update_time_copy);
+#else
+	last_update_time = cfs_rq->avg.last_update_time;
+#endif
+
+	p->se.avg.last_update_time = last_update_time;
+}
+
 static void task_dead_fair(struct task_struct *p)
 {
 	remove_entity_load_avg(&p->se);
@@ -8314,7 +8356,8 @@ const struct sched_class fair_sched_class = {
 
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_fair,
-	.migrate_task_rq	= migrate_task_rq_fair,
+	.migrate_task_rq_from	= migrate_task_rq_from_fair,
+	.migrate_task_rq_to	= migrate_task_rq_to_fair,
 
 	.rq_online		= rq_online_fair,
 	.rq_offline		= rq_offline_fair,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9432c66..dd19c30 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1187,7 +1187,8 @@ struct sched_class {
 
 #ifdef CONFIG_SMP
 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
-	void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
+	void (*migrate_task_rq_from)(struct task_struct *p, int next_cpu);
+	void (*migrate_task_rq_to)(struct task_struct *p, int next_cpu);
 
 	void (*task_waking) (struct task_struct *task);
 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ