lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 1 Jul 2008 15:32:09 +0200
From:	"Dmitry Adamushko" <dmitry.adamushko@...il.com>
To:	"Gregory Haskins" <ghaskins@...ell.com>
Cc:	"Ingo Molnar" <mingo@...e.hu>,
	"Steven Rostedt" <rostedt@...dmis.org>,
	"Thomas Gleixner" <tglx@...utronix.de>,
	linux-kernel@...r.kernel.org
Subject: [sched-devel, patch-rfc] rework #2 of "prioritize non-migratable tasks over migratable ones"

Hi,


this is a continuation of another thread:
http://www.ussg.iu.edu/hypermail/linux/kernel/0807.0/0134.html


The following patch is just an illustration, not tested even compilation wise.


(1) handle in a generic way all cases when a newly woken-up task is
not migratable (not just a corner case when "rt_se->nr_cpus_allowed ==
1")

(2) if current is to be preempted, then make sure "p" will be picked
up by pick_next_task_rt().
i.e. move task's group at the head of its list as well.

currently, it's not a case for group-scheduling as described here:
http://www.ussg.iu.edu/hypermail/linux/kernel/0807.0/0134.html


what do you think?


(non white-space-damaged version is enclosed)


---

--- sched_rt-old.c      2008-07-01 11:42:30.000000000 +0200
+++ sched_rt.c  2008-07-01 15:00:55.000000000 +0200
@@ -599,11 +599,7 @@ static void __enqueue_rt_entity(struct s
        if (group_rq && (rt_rq_throttled(group_rq) ||
!group_rq->rt_nr_running))                return;

-       if (rt_se->nr_cpus_allowed == 1)
-               list_add(&rt_se->run_list, queue);
-       else
-               list_add_tail(&rt_se->run_list, queue);
-
+       list_add_tail(&rt_se->run_list, queue);
        __set_bit(rt_se_prio(rt_se), array->bitmap);

        inc_rt_tasks(rt_se, rt_rq);
@@ -689,31 +685,33 @@ static void dequeue_task_rt(struct rq *r
  * followed by enqueue.
  */
 static
-void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity
*rt_se, int head)
 {
-       struct rt_prio_array *array = &rt_rq->active;
-
        if (on_rt_rq(rt_se)) {
-               list_del_init(&rt_se->run_list);
-               list_add_tail(&rt_se->run_list,
-                             array->queue + rt_se_prio(rt_se));
+               struct rt_prio_array *array = &rt_rq->active;
+               struct list_head *queue = array->queue + rt_se_prio(rt_se);
+
+               if (head)
+                       list_move(&rt_se->run_list, queue);
+               else
+                       list_move_tail(&rt_se->run_list, queue);
        }
 }

-static void requeue_task_rt(struct rq *rq, struct task_struct *p)
+static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
 {
        struct sched_rt_entity *rt_se = &p->rt;
        struct rt_rq *rt_rq;

        for_each_sched_rt_entity(rt_se) {
                rt_rq = rt_rq_of_se(rt_se);
-               requeue_rt_entity(rt_rq, rt_se);
+               requeue_rt_entity(rt_rq, rt_se, head);
        }
 }

 static void yield_task_rt(struct rq *rq)
 {
-       requeue_task_rt(rq, rq->curr);
+       requeue_task_rt(rq, rq->curr, 0);
 }

 #ifdef CONFIG_SMP
@@ -753,6 +751,29 @@ static int select_task_rq_rt(struct task
         */
        return task_cpu(p);
 }
+
+static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
+{
+       cpumask_t mask;
+
+       if (rq->curr.rt.nr_cpus_allowed == 1 || p->rt.nr_cpus_allowed == 1)
+               return;
+
+       if (cpupri_find(&rq->rd->cpupri, p, &mask))
+               return;
+
+       if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
+               return;
+
+       /*
+        * There appears to be other cpus that can accept
+        * current and none to run 'p', so lets reschedule
+        * to try and push current away:
+        */
+       requeue_task_rt(rq, p, 1);
+       resched_task(rq->curr);
+}
+
 #endif /* CONFIG_SMP */

 /*
@@ -778,18 +799,8 @@ static void check_preempt_curr_rt(struct
         * to move current somewhere else, making room for our non-migratable
         * task.
         */
-       if((p->prio == rq->curr->prio)
-          && p->rt.nr_cpus_allowed == 1
-          && rq->curr->rt.nr_cpus_allowed != 1) {
-               cpumask_t mask;
-
-               if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
-                       /*
-                        * There appears to be other cpus that can accept
-                        * current, so lets reschedule to try and push it away
-                        */
-                       resched_task(rq->curr);
-       }
+       if (p->prio == rq->curr->prio && !need_resched())
+               check_preempt_equal_prio(rq, p);
 #endif
 }

@@ -1415,7 +1426,7 @@ static void task_tick_rt(struct rq *rq,
         * on the queue:
         */
        if (p->rt.run_list.prev != p->rt.run_list.next) {
-               requeue_task_rt(rq, p);
+               requeue_task_rt(rq, p, 0);
                set_tsk_need_resched(p);
        }
 }

---



-- 
Best regards,
Dmitry Adamushko

View attachment "resched-eqial-prio.patch" of type "text/x-patch" (3172 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ