lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200813014555.550173612@goodmis.org>
Date:   Wed, 12 Aug 2020 21:45:36 -0400
From:   Steven Rostedt <rostedt@...dmis.org>
To:     linux-kernel@...r.kernel.org,
        linux-rt-users <linux-rt-users@...r.kernel.org>
Cc:     Thomas Gleixner <tglx@...utronix.de>,
        Carsten Emde <C.Emde@...dl.org>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        John Kacur <jkacur@...hat.com>, Daniel Wagner <wagi@...om.org>,
        Tom Zanussi <zanussi@...nel.org>,
        "Srivatsa S. Bhat" <srivatsa@...il.mit.edu>
Subject: [PATCH RT 2/6] workqueue: Sync with upstream

5.4.54-rt33-rc1 stable review patch.
If anyone has any objections, please let me know.

------------------

From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>

This is an all-on-one patch reverting the following commits:
  workqueue: Don't assume that the callback has interrupts disabled
  sched/swait: Add swait_event_lock_irq()
  workqueue: Use swait for wq_manager_wait
  workqueue: Convert the locks to raw type

and introducing the following commits from upstream:
  workqueue: Use rcuwait for wq_manager_wait
  workqueue: Convert the pool::lock and wq_mayday_lock to raw_spinlock_t

as an replacement.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@...dmis.org>
---
 include/linux/swait.h | 14 --------------
 kernel/workqueue.c    | 28 +++++++++++++++++++---------
 2 files changed, 19 insertions(+), 23 deletions(-)

diff --git a/include/linux/swait.h b/include/linux/swait.h
index 21ae66cd41d3..f426a0661aa0 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -299,18 +299,4 @@ do {									\
 	__ret;								\
 })
 
-#define __swait_event_lock_irq(wq, condition, lock, cmd)		\
-	___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0,		\
-		       raw_spin_unlock_irq(&lock);			\
-		       cmd;						\
-		       schedule();					\
-		       raw_spin_lock_irq(&lock))
-
-#define swait_event_lock_irq(wq_head, condition, lock)			\
-	do {								\
-		if (condition)						\
-			break;						\
-		__swait_event_lock_irq(wq_head, condition, lock, );	\
-	} while (0)
-
 #endif /* _LINUX_SWAIT_H */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 26341e8101ee..7c968aea01e1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -50,7 +50,6 @@
 #include <linux/uaccess.h>
 #include <linux/sched/isolation.h>
 #include <linux/nmi.h>
-#include <linux/swait.h>
 
 #include "workqueue_internal.h"
 
@@ -302,7 +301,8 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
 static DEFINE_RAW_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
-static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+/* wait for manager to go away */
+static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
 
 static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
 static bool workqueue_freezing;		/* PL: have wqs started freezing? */
@@ -1615,11 +1615,9 @@ EXPORT_SYMBOL_GPL(queue_work_node);
 void delayed_work_timer_fn(struct timer_list *t)
 {
 	struct delayed_work *dwork = from_timer(dwork, t, timer);
-	unsigned long flags;
 
-	local_irq_save(flags);
+	/* should have been called from irqsafe timer with irq already off */
 	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
-	local_irq_restore(flags);
 }
 EXPORT_SYMBOL(delayed_work_timer_fn);
 
@@ -2147,7 +2145,7 @@ static bool manage_workers(struct worker *worker)
 
 	pool->manager = NULL;
 	pool->flags &= ~POOL_MANAGER_ACTIVE;
-	swake_up_one(&wq_manager_wait);
+	rcuwait_wake_up(&manager_wait);
 	return true;
 }
 
@@ -3511,6 +3509,18 @@ static void rcu_free_pool(struct rcu_head *rcu)
 	kfree(pool);
 }
 
+/* This returns with the lock held on success (pool manager is inactive). */
+static bool wq_manager_inactive(struct worker_pool *pool)
+{
+	raw_spin_lock_irq(&pool->lock);
+
+	if (pool->flags & POOL_MANAGER_ACTIVE) {
+		raw_spin_unlock_irq(&pool->lock);
+		return false;
+	}
+	return true;
+}
+
 /**
  * put_unbound_pool - put a worker_pool
  * @pool: worker_pool to put
@@ -3546,10 +3556,10 @@ static void put_unbound_pool(struct worker_pool *pool)
 	 * Become the manager and destroy all workers.  This prevents
 	 * @pool's workers from blocking on attach_mutex.  We're the last
 	 * manager and @pool gets freed with the flag set.
+	 * Because of how wq_manager_inactive() works, we will hold the
+	 * spinlock after a successful wait.
 	 */
-	raw_spin_lock_irq(&pool->lock);
-	swait_event_lock_irq(wq_manager_wait,
-			    !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+	rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool));
 	pool->flags |= POOL_MANAGER_ACTIVE;
 
 	while ((worker = first_idle_worker(pool)))
-- 
2.28.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ