[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220804084135.92425-5-jiangshanlai@gmail.com>
Date: Thu, 4 Aug 2022 16:41:31 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Lai Jiangshan <jiangshan.ljs@...group.com>,
Tejun Heo <tj@...nel.org>,
Lai Jiangshan <jiangshanlai@...il.com>
Subject: [RFC PATCH 4/8] workqueue: Set/Clear PF_WQ_WORKER while attaching/detaching
From: Lai Jiangshan <jiangshan.ljs@...group.com>
PF_WQ_WORKER is only needed when the worker is attached.
Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>
---
kernel/workqueue.c | 32 ++++++++++++--------------------
1 file changed, 12 insertions(+), 20 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 82937c0fb21f..7fc4c2fa21d6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1861,6 +1861,8 @@ static struct worker *alloc_worker(int node)
static void worker_attach_to_pool(struct worker *worker,
struct worker_pool *pool)
{
+ WARN_ON_ONCE(worker->task != current);
+
mutex_lock(&wq_pool_attach_mutex);
/*
@@ -1882,6 +1884,9 @@ static void worker_attach_to_pool(struct worker *worker,
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
+ /* tell the scheduler that this is a workqueue worker */
+ current->flags |= PF_WQ_WORKER;
+
mutex_unlock(&wq_pool_attach_mutex);
}
@@ -1898,8 +1903,11 @@ static void worker_detach_from_pool(struct worker *worker)
struct worker_pool *pool = worker->pool;
struct completion *detach_completion = NULL;
+ WARN_ON_ONCE(worker->task != current);
+
mutex_lock(&wq_pool_attach_mutex);
+ current->flags &= ~PF_WQ_WORKER;
kthread_set_per_cpu(worker->task, -1);
list_del(&worker->node);
worker->pool = NULL;
@@ -2352,16 +2360,6 @@ static void process_scheduled_works(struct worker *worker)
}
}
-static void set_pf_worker(bool val)
-{
- mutex_lock(&wq_pool_attach_mutex);
- if (val)
- current->flags |= PF_WQ_WORKER;
- else
- current->flags &= ~PF_WQ_WORKER;
- mutex_unlock(&wq_pool_attach_mutex);
-}
-
/**
* worker_thread - the worker thread function
* @__worker: self
@@ -2384,9 +2382,6 @@ static int worker_thread(void *__worker)
/* attach the worker to the pool */
worker_attach_to_pool(worker, pool);
- /* tell the scheduler that this is a workqueue worker */
- set_pf_worker(true);
-
raw_spin_lock_irq(&pool->lock);
worker->pool->nr_workers++;
worker_enter_idle(worker);
@@ -2397,7 +2392,6 @@ static int worker_thread(void *__worker)
if (unlikely(worker->flags & WORKER_DIE)) {
raw_spin_unlock_irq(&pool->lock);
WARN_ON_ONCE(!list_empty(&worker->entry));
- set_pf_worker(false);
set_task_comm(worker->task, "kworker/dying");
ida_free(&pool->worker_ida, worker->id);
@@ -2498,11 +2492,6 @@ static int rescuer_thread(void *__rescuer)
current->flags |= PF_NO_SETAFFINITY;
set_user_nice(current, RESCUER_NICE_LEVEL);
- /*
- * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
- * doesn't participate in concurrency management.
- */
- set_pf_worker(true);
repeat:
set_current_state(TASK_IDLE);
@@ -2531,6 +2520,10 @@ static int rescuer_thread(void *__rescuer)
raw_spin_unlock_irq(&wq_mayday_lock);
+ /*
+ * Attach the rescuer. As WORKER_PREP is never cleared, it
+ * doesn't participate in concurrency management.
+ */
worker_attach_to_pool(rescuer, pool);
raw_spin_lock_irq(&pool->lock);
@@ -2600,7 +2593,6 @@ static int rescuer_thread(void *__rescuer)
if (should_stop) {
__set_current_state(TASK_RUNNING);
- set_pf_worker(false);
return 0;
}
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists