lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1377784669-28140-14-git-send-email-huawei.libin@huawei.com>
Date:	Thu, 29 Aug 2013 21:57:48 +0800
From:	Libin <huawei.libin@...wei.com>
To:	<akpm@...ux-foundation.org>, <tj@...nel.org>,
	<viro@...iv.linux.org.uk>, <eparis@...hat.com>,
	<tglx@...utronix.de>, <rusty@...tcorp.com.au>,
	<ebiederm@...ssion.com>, <paulmck@...ux.vnet.ibm.com>,
	<john.stultz@...aro.org>, <mingo@...hat.com>,
	<peterz@...radead.org>, <gregkh@...uxfoundation.org>
CC:	<linux-kernel@...r.kernel.org>, <lizefan@...wei.com>,
	<jovi.zhangwei@...wei.com>, <guohanjun@...wei.com>,
	<zhangdianfang@...wei.com>, <wangyijing@...wei.com>,
	<huawei.libin@...wei.com>
Subject: [PATCH 13/14] workqueue: Fix invalid wakeup in rescuer_thread

If thread is preempted before calling set_current_state(TASK_INTERRUPTIBLE),
and the other thread set the condition followed with wake_up_process. After
that when this thread is re-scheduled, calling set_current_state to set itself
as state TASK_INTERRUPTIBLE, if it is preempted again after that and before
__set_current_state(TASK_RUNNING), it triggers the invalid wakeup problem.

To solve this problem, using preempt_disable() to bound the operaion that
setting the task state and the conditions(set by the wake thread) validation.

Signed-off-by: Libin <huawei.libin@...wei.com>
---
 kernel/workqueue.c | 92 +++++++++++++++++++++++++++++-------------------------
 1 file changed, 49 insertions(+), 43 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7f5d4be..2dcdd30 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2358,63 +2358,69 @@ static int rescuer_thread(void *__rescuer)
 	 * doesn't participate in concurrency management.
 	 */
 	rescuer->task->flags |= PF_WQ_WORKER;
-repeat:
-	set_current_state(TASK_INTERRUPTIBLE);
+	for (;;) {
 
-	if (kthread_should_stop()) {
+		if (kthread_should_stop()) {
+			rescuer->task->flags &= ~PF_WQ_WORKER;
+			return 0;
+		}
+
+		preempt_disable();
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (list_empty(&wq->maydays)) {
+			preempt_enable();
+			schedule();
+			preempt_disable();
+		}
 		__set_current_state(TASK_RUNNING);
-		rescuer->task->flags &= ~PF_WQ_WORKER;
-		return 0;
-	}
+		preempt_enable();
 
-	/* see whether any pwq is asking for help */
-	spin_lock_irq(&wq_mayday_lock);
+		/* see whether any pwq is asking for help */
+		spin_lock_irq(&wq_mayday_lock);
 
-	while (!list_empty(&wq->maydays)) {
-		struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
+		while (!list_empty(&wq->maydays)) {
+			struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
 					struct pool_workqueue, mayday_node);
-		struct worker_pool *pool = pwq->pool;
-		struct work_struct *work, *n;
+			struct worker_pool *pool = pwq->pool;
+			struct work_struct *work, *n;
 
-		__set_current_state(TASK_RUNNING);
-		list_del_init(&pwq->mayday_node);
+			list_del_init(&pwq->mayday_node);
 
-		spin_unlock_irq(&wq_mayday_lock);
+			spin_unlock_irq(&wq_mayday_lock);
 
-		/* migrate to the target cpu if possible */
-		worker_maybe_bind_and_lock(pool);
-		rescuer->pool = pool;
+			/* migrate to the target cpu if possible */
+			worker_maybe_bind_and_lock(pool);
+			rescuer->pool = pool;
 
-		/*
-		 * Slurp in all works issued via this workqueue and
-		 * process'em.
-		 */
-		WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
-		list_for_each_entry_safe(work, n, &pool->worklist, entry)
-			if (get_work_pwq(work) == pwq)
-				move_linked_works(work, scheduled, &n);
+			/*
+			 * Slurp in all works issued via this workqueue and
+			 * process'em.
+			 */
+			WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
+			list_for_each_entry_safe(work, n, &pool->worklist, entry)
+				if (get_work_pwq(work) == pwq)
+					move_linked_works(work, scheduled, &n);
 
-		process_scheduled_works(rescuer);
+			process_scheduled_works(rescuer);
 
-		/*
-		 * Leave this pool.  If keep_working() is %true, notify a
-		 * regular worker; otherwise, we end up with 0 concurrency
-		 * and stalling the execution.
-		 */
-		if (keep_working(pool))
-			wake_up_worker(pool);
+			/*
+			 * Leave this pool.  If keep_working() is %true, notify a
+			 * regular worker; otherwise, we end up with 0 concurrency
+			 * and stalling the execution.
+			 */
+			if (keep_working(pool))
+				wake_up_worker(pool);
 
-		rescuer->pool = NULL;
-		spin_unlock(&pool->lock);
-		spin_lock(&wq_mayday_lock);
-	}
+			rescuer->pool = NULL;
+			spin_unlock(&pool->lock);
+			spin_lock(&wq_mayday_lock);
+		}
 
-	spin_unlock_irq(&wq_mayday_lock);
+		spin_unlock_irq(&wq_mayday_lock);
 
-	/* rescuers should never participate in concurrency management */
-	WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
-	schedule();
-	goto repeat;
+		/* rescuers should never participate in concurrency management */
+		WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
+	}
 }
 
 struct wq_barrier {
-- 
1.8.2.1


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ