lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1294677746.8345.4.camel@marge.simson.net>
Date:	Mon, 10 Jan 2011 17:42:26 +0100
From:	Mike Galbraith <efault@....de>
To:	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	bharata@...ux.vnet.ibm.com, Ingo Molnar <mingo@...e.hu>,
	linux-kernel@...r.kernel.org
Subject: Re: [patch] Re: autogroup: sched_setscheduler() fails

On Mon, 2011-01-10 at 11:59 +0100, Peter Zijlstra wrote:
> On Mon, 2011-01-10 at 11:29 +0100, Mike Galbraith wrote:
> >
> > Autogroup does not support realtime task groups, so make selection of
> > SCHED_AUTOGROUP exclude RT_GROUP_SCHED.
> > 

> Uhm, no. The right way is to put tasks back into the root group and then
> perform sched_setscheduler().

I think that would take more lines, this look like a right way too?

sched, autogroup: move tasks to the appropriate runqueue on policy change.

If CONFIG_RT_GROUP_SCHED is set, sched_setscheduler() fails due to autogroup
not allocating rt_runtime.  Fool __sched_setscheduler() into proceeding on, and
move tasks to the appropriate runqueue upon policy change.

Signed-off-by: Mike Galbraith <efault@....de>
Reported-by: Bharata B Rao <bharata@...ux.vnet.ibm.com>

---
 kernel/sched.c           |    9 ++++++---
 kernel/sched_autogroup.c |   18 ++++++++++++++++++
 kernel/sched_autogroup.h |    1 +
 3 files changed, 25 insertions(+), 3 deletions(-)

Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -4571,6 +4571,8 @@ void rt_mutex_setprio(struct task_struct
 
 	p->prio = prio;
 
+	autogroup_setscheduler(p, on_rq);
+
 	if (running)
 		p->sched_class->set_curr_task(rq);
 	if (on_rq) {
@@ -4738,7 +4740,7 @@ static struct task_struct *find_process_
 
 /* Actually do priority change: must hold rq lock. */
 static void
-__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
+__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio, int on_rq)
 {
 	BUG_ON(p->se.on_rq);
 
@@ -4752,6 +4754,7 @@ __setscheduler(struct rq *rq, struct tas
 	else
 		p->sched_class = &fair_sched_class;
 	set_load_weight(p);
+	autogroup_setscheduler(p, on_rq);
 }
 
 /*
@@ -4900,7 +4903,7 @@ recheck:
 
 	oldprio = p->prio;
 	prev_class = p->sched_class;
-	__setscheduler(rq, p, policy, param->sched_priority);
+	__setscheduler(rq, p, policy, param->sched_priority, on_rq);
 
 	if (running)
 		p->sched_class->set_curr_task(rq);
@@ -8116,7 +8119,7 @@ static void normalize_task(struct rq *rq
 	on_rq = p->se.on_rq;
 	if (on_rq)
 		deactivate_task(rq, p, 0);
-	__setscheduler(rq, p, SCHED_NORMAL, 0);
+	__setscheduler(rq, p, SCHED_NORMAL, 0, on_rq);
 	if (on_rq) {
 		activate_task(rq, p, 0);
 		resched_task(rq->curr);
Index: linux-2.6/kernel/sched_autogroup.c
===================================================================
--- linux-2.6.orig/kernel/sched_autogroup.c
+++ linux-2.6/kernel/sched_autogroup.c
@@ -73,6 +73,15 @@ static inline struct autogroup *autogrou
 	ag->id = atomic_inc_return(&autogroup_seq_nr);
 	ag->tg = tg;
 	tg->autogroup = ag;
+#ifdef CONFIG_RT_GROUP_SCHED
+	/*
+	 * HACK: autogroup RT tasks run in the root task group.
+	 * This fools __sched_setscheduler() into proceeding on
+	 * so we can move the task to the appropriate runqueue
+	 * upon scheduling policy change.
+	 */
+	tg->rt_bandwidth.rt_runtime = RUNTIME_INF;
+#endif
 
 	return ag;
 
@@ -143,6 +152,15 @@ autogroup_move_group(struct task_struct
 	autogroup_kref_put(prev);
 }
 
+static inline void
+autogroup_setscheduler(struct task_struct *p, int on_rq)
+{
+	if (p->sched_class->task_move_group)
+		p->sched_class->task_move_group(p, on_rq);
+	else
+		set_task_rq(p, task_cpu(p));
+}
+
 /* Allocates GFP_KERNEL, cannot be called under any spinlock */
 void sched_autogroup_create_attach(struct task_struct *p)
 {
Index: linux-2.6/kernel/sched_autogroup.h
===================================================================
--- linux-2.6.orig/kernel/sched_autogroup.h
+++ linux-2.6/kernel/sched_autogroup.h
@@ -15,6 +15,7 @@ autogroup_task_group(struct task_struct
 
 static inline void autogroup_init(struct task_struct *init_task) {  }
 static inline void autogroup_free(struct task_group *tg) { }
+static inline void autogroup_setscheduler(struct task_struct *p, int on_rq) { }
 
 static inline struct task_group *
 autogroup_task_group(struct task_struct *p, struct task_group *tg)


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ