lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 23 Sep 2015 00:37:18 +0000
From:	"Meyer, Mike" <Mike.Meyer@...adata.com>
To:	"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
CC:	"mingo@...hat.com" <mingo@...hat.com>,
	"peterz@...radead.org" <peterz@...radead.org>
Subject: [PATCH] sched: fix task and run queue run_delay inconsistencies

During evaluation of some performance data, it was discovered thread
and run queue run_delay accounting data was inconsistent with the other
accounting data that was collected.  Further investigation found under
certain circumstances execution time was leaking into the task and
run queue accounting of run_delay.

Consider the following sequence:

    a. thread is running.
    b. thread moves beween cgroups, changes scheduling class or priority.
    c. thread sleeps OR
    d. thread involuntarily gives up cpu.

a. implies:

    thread->sched_info.last_queued = 0

a. and b. results in the following:

    1. dequeue_task(rq, thread)

           sched_info_dequeued(rq, thread)
               delta = 0

               sched_info_reset_dequeued(thread)
                   thread->sched_info.last_queued = 0

               thread->sched_info.run_delay += delta

    2. enqueue_task(rq, thread)

           sched_info_queued(rq, thread)

               /* thread is still on cpu at this point. */
               thread->sched_info.last_queued = task_rq(thread)->clock;

c. results in:

    dequeue_task(rq, thread)

        sched_info_dequeued(rq, thread)

            /* delta is execution time not run_delay. */
            delta = task_rq(thread)->clock - thread->sched_info.last_queued

        sched_info_reset_dequeued(thread)
            thread->sched_info.last_queued = 0

        thread->sched_info.run_delay += delta

    Since thread was running between enqueue_task(rq, thread) and
    dequeue_task(rq, thread), the delta above is really execution
    time and not run_delay.

d. results in:

    __sched_info_switch(thread, next_thread)

        sched_info_depart(rq, thread)

            sched_info_queued(rq, thread)

                /* last_queued not updated due to being non-zero */
                return

    Since thread was running between enqueue_task(rq, thread) and
    __sched_info_switch(thread, next_thread), the execution time
    between enqueue_task(rq, thread) and
    __sched_info_switch(thread, next_thread) now will become
    associated with run_delay due to when last_queued was last updated.

The proposed patch addresses the issue by calling
sched_info_reset_dequeued(thread) following the call to
enqueue_task(rq, thread) for running threads in situations in which
thread->sched_info.last_queued should remain 0.

Signed-off-by: Mike Meyer <mike.meyer@...adata.com>
---
 kernel/sched/core.c | 36 ++++++++++++++++++++++++++++++------
 1 file changed, 30 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2f9c928..88bfe43 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1187,8 +1187,12 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 
 	if (running)
 		p->sched_class->set_curr_task(rq);
-	if (queued)
+	if (queued) {
 		enqueue_task(rq, p, 0);
+
+		if (running)
+			sched_info_reset_dequeued(p);
+	}
 }
 
 /*
@@ -3378,9 +3382,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 
 	if (running)
 		p->sched_class->set_curr_task(rq);
-	if (queued)
+	if (queued) {
 		enqueue_task(rq, p, enqueue_flag);
 
+		if (running)
+			sched_info_reset_dequeued(p);
+	}
+
 	check_class_changed(rq, p, prev_class, oldprio);
 out_unlock:
 	preempt_disable(); /* avoid rq from going away on us */
@@ -3393,7 +3401,7 @@ out_unlock:
 
 void set_user_nice(struct task_struct *p, long nice)
 {
-	int old_prio, delta, queued;
+	int old_prio, delta, queued, running;
 	unsigned long flags;
 	struct rq *rq;
 
@@ -3415,6 +3423,7 @@ void set_user_nice(struct task_struct *p, long nice)
 		goto out_unlock;
 	}
 	queued = task_on_rq_queued(p);
+	running = task_current(rq, p);
 	if (queued)
 		dequeue_task(rq, p, 0);
 
@@ -3426,11 +3435,15 @@ void set_user_nice(struct task_struct *p, long nice)
 
 	if (queued) {
 		enqueue_task(rq, p, 0);
+
+		if (running)
+			sched_info_reset_dequeued(p);
+
 		/*
 		 * If the task increased its priority or is running and
 		 * lowered its priority, then reschedule its CPU:
 		 */
-		if (delta < 0 || (delta > 0 && task_running(rq, p)))
+		if (delta < 0 || (delta > 0 && running))
 			resched_curr(rq);
 	}
 out_unlock:
@@ -3945,6 +3958,9 @@ change:
 		 * increased (user space view).
 		 */
 		enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
+
+		if (running)
+			sched_info_reset_dequeued(p);
 	}
 
 	check_class_changed(rq, p, prev_class, oldprio);
@@ -5093,8 +5109,12 @@ void sched_setnuma(struct task_struct *p, int nid)
 
 	if (running)
 		p->sched_class->set_curr_task(rq);
-	if (queued)
+	if (queued) {
 		enqueue_task(rq, p, 0);
+
+		if (running)
+			sched_info_reset_dequeued(p);
+	}
 	task_rq_unlock(rq, p, &flags);
 }
 #endif /* CONFIG_NUMA_BALANCING */
@@ -7735,9 +7755,13 @@ void sched_move_task(struct task_struct *tsk)
 
 	if (unlikely(running))
 		tsk->sched_class->set_curr_task(rq);
-	if (queued)
+	if (queued) {
 		enqueue_task(rq, tsk, 0);
 
+		if (unlikely(running))
+			sched_info_reset_dequeued(tsk);
+	}
+
 	task_rq_unlock(rq, tsk, &flags);
 }
 #endif /* CONFIG_CGROUP_SCHED */
-- 
2.1.4



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ