lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Wed, 27 Dec 2023 22:51:40 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Tejun Heo <tj@...nel.org>,
	Naohiro.Aota@....com,
	Lai Jiangshan <jiangshan.ljs@...group.com>,
	Lai Jiangshan <jiangshanlai@...il.com>
Subject: [PATCH 4/7] workqueue: Wrap common code into wq_adjust_pwqs_max_active()

From: Lai Jiangshan <jiangshan.ljs@...group.com>

There are 3 places using the same code, so wrap them into a common helper.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>
---
 kernel/workqueue.c | 37 +++++++++++++++----------------------
 1 file changed, 15 insertions(+), 22 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3347ba3a734f..e0101b2b5fa3 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4190,6 +4190,16 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
 	raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
 }
 
+static void wq_adjust_pwqs_max_active(struct workqueue_struct *wq)
+{
+	struct pool_workqueue *pwq;
+
+	mutex_lock(&wq->mutex);
+	for_each_pwq(pwq, wq)
+		pwq_adjust_max_active(pwq);
+	mutex_unlock(&wq->mutex);
+}
+
 /* initialize newly allocated @pwq which is associated with @wq and @pool */
 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
 		     struct worker_pool *pool)
@@ -4700,7 +4710,6 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
 {
 	va_list args;
 	struct workqueue_struct *wq;
-	struct pool_workqueue *pwq;
 
 	/*
 	 * Unbound && max_active == 1 used to imply ordered, which is no longer
@@ -4761,14 +4770,8 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
 	 * list.
 	 */
 	mutex_lock(&wq_pool_mutex);
-
-	mutex_lock(&wq->mutex);
-	for_each_pwq(pwq, wq)
-		pwq_adjust_max_active(pwq);
-	mutex_unlock(&wq->mutex);
-
+	wq_adjust_pwqs_max_active(wq);
 	list_add_tail_rcu(&wq->list, &workqueues);
-
 	mutex_unlock(&wq_pool_mutex);
 
 	return wq;
@@ -5698,19 +5701,14 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
 void freeze_workqueues_begin(void)
 {
 	struct workqueue_struct *wq;
-	struct pool_workqueue *pwq;
 
 	mutex_lock(&wq_pool_mutex);
 
 	WARN_ON_ONCE(workqueue_freezing);
 	workqueue_freezing = true;
 
-	list_for_each_entry(wq, &workqueues, list) {
-		mutex_lock(&wq->mutex);
-		for_each_pwq(pwq, wq)
-			pwq_adjust_max_active(pwq);
-		mutex_unlock(&wq->mutex);
-	}
+	list_for_each_entry(wq, &workqueues, list)
+		wq_adjust_pwqs_max_active(wq);
 
 	mutex_unlock(&wq_pool_mutex);
 }
@@ -5773,7 +5771,6 @@ bool freeze_workqueues_busy(void)
 void thaw_workqueues(void)
 {
 	struct workqueue_struct *wq;
-	struct pool_workqueue *pwq;
 
 	mutex_lock(&wq_pool_mutex);
 
@@ -5783,12 +5780,8 @@ void thaw_workqueues(void)
 	workqueue_freezing = false;
 
 	/* restore max_active and repopulate worklist */
-	list_for_each_entry(wq, &workqueues, list) {
-		mutex_lock(&wq->mutex);
-		for_each_pwq(pwq, wq)
-			pwq_adjust_max_active(pwq);
-		mutex_unlock(&wq->mutex);
-	}
+	list_for_each_entry(wq, &workqueues, list)
+		wq_adjust_pwqs_max_active(wq);
 
 out_unlock:
 	mutex_unlock(&wq_pool_mutex);
-- 
2.19.1.6.gb485710b


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ