lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240221174333.700197-6-tj@kernel.org>
Date: Wed, 21 Feb 2024 07:43:03 -1000
From: Tejun Heo <tj@...nel.org>
To: jiangshanlai@...il.com
Cc: torvalds@...ux-foundation.org,
	linux-kernel@...r.kernel.org,
	allen.lkml@...il.com,
	kernel-team@...a.com,
	Tejun Heo <tj@...nel.org>
Subject: [PATCH 5/7] workqueue: Update how start_flush_work() is called

In prepartion of in-BH canceling of BH work items, update start_flush_work()
so that:

- rcu_read_lock()'ing is moved to the caller.

- Instead of true or false, it now returns the worker_pool associated with
  the work item if the work item needs to be waited for. NULL if waiting is
  not needed.

- Add a WARN if it encounters a queued work item when @from_cancel. This
  shouldn't happen.

No behavior changes are intended.

Signed-off-by: Tejun Heo <tj@...nel.org>
---
 kernel/workqueue.c | 39 ++++++++++++++++++++++-----------------
 1 file changed, 22 insertions(+), 17 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a03252ef3c8f..71a53bec4631 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4004,8 +4004,9 @@ void drain_workqueue(struct workqueue_struct *wq)
 }
 EXPORT_SYMBOL_GPL(drain_workqueue);
 
-static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
-			     bool from_cancel)
+static struct worker_pool *start_flush_work(struct work_struct *work,
+					    struct wq_barrier *barr,
+					    bool from_cancel)
 {
 	struct worker *worker = NULL;
 	struct worker_pool *pool;
@@ -4014,12 +4015,9 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
 
 	might_sleep();
 
-	rcu_read_lock();
 	pool = get_work_pool(work);
-	if (!pool) {
-		rcu_read_unlock();
-		return false;
-	}
+	if (!pool)
+		return NULL;
 
 	raw_spin_lock_irq(&pool->lock);
 	/* see the comment in try_to_grab_pending() with the same code */
@@ -4027,6 +4025,12 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
 	if (pwq) {
 		if (unlikely(pwq->pool != pool))
 			goto already_gone;
+		/*
+		 * Cancel path should already have removed @work from worklist
+		 * in try_to_grab_pending(). Control should get here iff we need
+		 * to wait for the current execution to finish.
+		 */
+		WARN_ON_ONCE(from_cancel);
 	} else {
 		worker = find_worker_executing_work(pool, work);
 		if (!worker)
@@ -4054,17 +4058,16 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
 	if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer))
 		touch_wq_lockdep_map(wq);
 
-	rcu_read_unlock();
-	return true;
+	return pool;
 already_gone:
 	raw_spin_unlock_irq(&pool->lock);
-	rcu_read_unlock();
-	return false;
+	return NULL;
 }
 
 static bool __flush_work(struct work_struct *work, bool from_cancel)
 {
 	struct wq_barrier barr;
+	struct worker_pool *pool;
 
 	if (WARN_ON(!wq_online))
 		return false;
@@ -4072,13 +4075,15 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
 	if (WARN_ON(!work->func))
 		return false;
 
-	if (start_flush_work(work, &barr, from_cancel)) {
-		wait_for_completion(&barr.done);
-		destroy_work_on_stack(&barr.work);
-		return true;
-	} else {
+	rcu_read_lock();
+	pool = start_flush_work(work, &barr, from_cancel);
+	rcu_read_unlock();
+	if (!pool)
 		return false;
-	}
+
+	wait_for_completion(&barr.done);
+	destroy_work_on_stack(&barr.work);
+	return true;
 }
 
 /**
-- 
2.43.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ