[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240621073225.3600-4-jiangshanlai@gmail.com>
Date: Fri, 21 Jun 2024 15:32:24 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Lai Jiangshan <jiangshan.ljs@...group.com>,
Valentin Schneider <vschneid@...hat.com>,
Tejun Heo <tj@...nel.org>,
Lai Jiangshan <jiangshanlai@...il.com>
Subject: [PATCH 3/4] workqueue: Detach workers directly in idle_cull_fn()
From: Lai Jiangshan <jiangshan.ljs@...group.com>
The code to kick off the destruction of workers is now in a process
context (idle_cull_fn()), and the detaching of a worker is not required
to be inside the worker thread now, so just do the detaching directly
in idle_cull_fn().
wake_dying_workers() is renamed to detach_dying_workers() and the unneeded
wakeup in wake_dying_workers() is also removed.
Cc: Valentin Schneider <vschneid@...hat.com>
Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>
---
kernel/workqueue.c | 45 +++++++++++++++++++--------------------------
1 file changed, 19 insertions(+), 26 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 93b87ca63a7e..e9ca42ef425a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2695,6 +2695,16 @@ static void unbind_worker(struct worker *worker)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
}
+
+static void detach_worker(struct worker *worker)
+{
+ lockdep_assert_held(&wq_pool_attach_mutex);
+
+ unbind_worker(worker);
+ list_del(&worker->node);
+ worker->pool = NULL;
+}
+
/**
* worker_detach_from_pool() - detach a worker from its pool
* @worker: worker which is attached to its pool
@@ -2711,11 +2721,7 @@ static void worker_detach_from_pool(struct worker *worker)
WARN_ON_ONCE(pool->flags & POOL_BH);
mutex_lock(&wq_pool_attach_mutex);
-
- unbind_worker(worker);
- list_del(&worker->node);
- worker->pool = NULL;
-
+ detach_worker(worker);
mutex_unlock(&wq_pool_attach_mutex);
/* clear leftover flags without pool->lock after it is detached */
@@ -2807,24 +2813,12 @@ static struct worker *create_worker(struct worker_pool *pool)
return NULL;
}
-static void wake_dying_workers(struct list_head *cull_list)
+static void detach_dying_workers(struct list_head *cull_list)
{
struct worker *worker;
- list_for_each_entry(worker, cull_list, entry) {
- unbind_worker(worker);
- /*
- * If the worker was somehow already running, then it had to be
- * in pool->idle_list when set_worker_dying() happened or we
- * wouldn't have gotten here.
- *
- * Thus, the worker must either have observed the WORKER_DIE
- * flag, or have set its state to TASK_IDLE. Either way, the
- * below will be observed by the worker and is safe to do
- * outside of pool->lock.
- */
- wake_up_process(worker->task);
- }
+ list_for_each_entry(worker, cull_list, entry)
+ detach_worker(worker);
}
static void reap_dying_workers(struct list_head *cull_list)
@@ -2930,9 +2924,9 @@ static void idle_cull_fn(struct work_struct *work)
/*
* Grabbing wq_pool_attach_mutex here ensures an already-running worker
- * cannot proceed beyong worker_detach_from_pool() in its self-destruct
- * path. This is required as a previously-preempted worker could run after
- * set_worker_dying() has happened but before wake_dying_workers() did.
+ * cannot proceed beyong set_pf_worker() in its self-destruct path.
+ * This is required as a previously-preempted worker could run after
+ * set_worker_dying() has happened but before detach_dying_workers() did.
*/
mutex_lock(&wq_pool_attach_mutex);
raw_spin_lock_irq(&pool->lock);
@@ -2953,7 +2947,7 @@ static void idle_cull_fn(struct work_struct *work)
}
raw_spin_unlock_irq(&pool->lock);
- wake_dying_workers(&cull_list);
+ detach_dying_workers(&cull_list);
mutex_unlock(&wq_pool_attach_mutex);
reap_dying_workers(&cull_list);
@@ -3336,7 +3330,6 @@ static int worker_thread(void *__worker)
set_task_comm(worker->task, "kworker/dying");
ida_free(&pool->worker_ida, worker->id);
- worker_detach_from_pool(worker);
WARN_ON_ONCE(!list_empty(&worker->entry));
return 0;
}
@@ -4921,7 +4914,7 @@ static void put_unbound_pool(struct worker_pool *pool)
WARN_ON(pool->nr_workers || pool->nr_idle);
raw_spin_unlock_irq(&pool->lock);
- wake_dying_workers(&cull_list);
+ detach_dying_workers(&cull_list);
mutex_unlock(&wq_pool_attach_mutex);
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists