lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1444812130-17880-2-git-send-email-byungchul.park@lge.com>
Date:	Wed, 14 Oct 2015 17:42:09 +0900
From:	<byungchul.park@....com>
To:	mingo@...nel.org, peterz@...radead.org
CC:	linux-kernel@...r.kernel.org, yuyang.du@...el.com, pjt@...gle.com,
	efault@....de, tglx@...utronix.de,
	Byungchul Park <byungchul.park@....com>
Subject: [PATCH v2 1/2] sched: make each sched class handle its rq assignment in their own class

From: Byungchul Park <byungchul.park@....com>

set_task_rq() which is a commonly used function regardless of sched class
is currently assigning both cfs_rq for fair class and rt_rq for rt class
to a task. but it would be better that a class related operation is done
by its own class respectively. additionally, this patch moves some code's
position to refer for_each_class() macro.

Signed-off-by: Byungchul Park <byungchul.park@....com>
---
 kernel/sched/fair.c  |    9 ++++
 kernel/sched/rt.c    |   14 ++++++
 kernel/sched/sched.h |  120 ++++++++++++++++++++++++--------------------------
 3 files changed, 80 insertions(+), 63 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 077076f..07882c2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8088,6 +8088,14 @@ static void task_move_group_fair(struct task_struct *p)
 	attach_task_cfs_rq(p);
 }
 
+static void set_task_rq_fair(struct task_struct *p, unsigned int cpu)
+{
+	struct task_group *tg = task_group(p);
+
+	p->se.cfs_rq = tg->cfs_rq[cpu];
+	p->se.parent = tg->se[cpu];
+}
+
 void free_fair_sched_group(struct task_group *tg)
 {
 	int i;
@@ -8306,6 +8314,7 @@ const struct sched_class fair_sched_class = {
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	.task_move_group	= task_move_group_fair,
+	.set_task_rq		= set_task_rq_fair,
 #endif
 };
 
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e3cc163..f1a1320 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2143,6 +2143,16 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
 	}
 }
 
+#ifdef CONFIG_RT_GROUP_SCHED
+static void set_task_rq_rt(struct task_struct *p, unsigned int cpu)
+{
+	struct task_group *tg = task_group(p);
+
+	p->rt.rt_rq  = tg->rt_rq[cpu];
+	p->rt.parent = tg->rt_se[cpu];
+}
+#endif
+
 /*
  * Priority of the task has changed. This may cause
  * us to initiate a push or pull.
@@ -2290,6 +2300,10 @@ const struct sched_class rt_sched_class = {
 	.switched_to		= switched_to_rt,
 
 	.update_curr		= update_curr_rt,
+
+#ifdef CONFIG_RT_GROUP_SCHED
+	.set_task_rq		= set_task_rq_rt,
+#endif
 };
 
 #ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index af6f252..7f73e89 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -905,69 +905,6 @@ static inline void sched_ttwu_pending(void) { }
 #include "stats.h"
 #include "auto_group.h"
 
-#ifdef CONFIG_CGROUP_SCHED
-
-/*
- * Return the group to which this tasks belongs.
- *
- * We cannot use task_css() and friends because the cgroup subsystem
- * changes that value before the cgroup_subsys::attach() method is called,
- * therefore we cannot pin it and might observe the wrong value.
- *
- * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
- * core changes this before calling sched_move_task().
- *
- * Instead we use a 'copy' which is updated from sched_move_task() while
- * holding both task_struct::pi_lock and rq::lock.
- */
-static inline struct task_group *task_group(struct task_struct *p)
-{
-	return p->sched_task_group;
-}
-
-/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
-static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
-{
-#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
-	struct task_group *tg = task_group(p);
-#endif
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-	p->se.cfs_rq = tg->cfs_rq[cpu];
-	p->se.parent = tg->se[cpu];
-#endif
-
-#ifdef CONFIG_RT_GROUP_SCHED
-	p->rt.rt_rq  = tg->rt_rq[cpu];
-	p->rt.parent = tg->rt_se[cpu];
-#endif
-}
-
-#else /* CONFIG_CGROUP_SCHED */
-
-static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
-static inline struct task_group *task_group(struct task_struct *p)
-{
-	return NULL;
-}
-
-#endif /* CONFIG_CGROUP_SCHED */
-
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
-	set_task_rq(p, cpu);
-#ifdef CONFIG_SMP
-	/*
-	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
-	 * successfuly executed on another CPU. We must ensure that updates of
-	 * per-task data have been completed by this moment.
-	 */
-	smp_wmb();
-	task_thread_info(p)->cpu = cpu;
-	p->wake_cpu = cpu;
-#endif
-}
-
 /*
  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  */
@@ -1222,6 +1159,8 @@ struct sched_class {
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	void (*task_move_group) (struct task_struct *p);
 #endif
+
+	void (*set_task_rq) (struct task_struct *p, unsigned int cpu);
 };
 
 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
@@ -1239,6 +1178,61 @@ extern const struct sched_class rt_sched_class;
 extern const struct sched_class fair_sched_class;
 extern const struct sched_class idle_sched_class;
 
+#ifdef CONFIG_CGROUP_SCHED
+
+/*
+ * Return the group to which this tasks belongs.
+ *
+ * We cannot use task_css() and friends because the cgroup subsystem
+ * changes that value before the cgroup_subsys::attach() method is called,
+ * therefore we cannot pin it and might observe the wrong value.
+ *
+ * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
+ * core changes this before calling sched_move_task().
+ *
+ * Instead we use a 'copy' which is updated from sched_move_task() while
+ * holding both task_struct::pi_lock and rq::lock.
+ */
+static inline struct task_group *task_group(struct task_struct *p)
+{
+	return p->sched_task_group;
+}
+
+/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
+static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
+{
+	const struct sched_class *class;
+
+	for_each_class(class) {
+		if (class->set_task_rq)
+			class->set_task_rq(p, cpu);
+	}
+}
+
+#else /* CONFIG_CGROUP_SCHED */
+
+static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
+static inline struct task_group *task_group(struct task_struct *p)
+{
+	return NULL;
+}
+
+#endif /* CONFIG_CGROUP_SCHED */
+
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+	set_task_rq(p, cpu);
+#ifdef CONFIG_SMP
+	/*
+	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+	 * successfuly executed on another CPU. We must ensure that updates of
+	 * per-task data have been completed by this moment.
+	 */
+	smp_wmb();
+	task_thread_info(p)->cpu = cpu;
+	p->wake_cpu = cpu;
+#endif
+}
 
 #ifdef CONFIG_SMP
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ