[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211223123140.3789-5-jiangshanlai@gmail.com>
Date: Thu, 23 Dec 2021 20:31:40 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org, Tejun Heo <tj@...nel.org>
Cc: Lai Jiangshan <laijs@...ux.alibaba.com>,
Lai Jiangshan <jiangshanlai@...il.com>
Subject: [PATCH 4/4] workqueue: Convert the type of pool->nr_running to int
From: Lai Jiangshan <laijs@...ux.alibaba.com>
It is only modified in associated CPU, so it doesn't need to be atomic.
Signed-off-by: Lai Jiangshan <laijs@...ux.alibaba.com>
---
kernel/workqueue.c | 29 ++++++++++++++++-------------
1 file changed, 16 insertions(+), 13 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 69cbe9e62bf1..dd3b3aa68954 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -154,8 +154,13 @@ struct worker_pool {
unsigned long watchdog_ts; /* L: watchdog timestamp */
- /* The current concurrency level. */
- atomic_t nr_running;
+ /*
+ * The current concurrency level.
+ * increase: process context in associated CPU (preemption disabled).
+ * decrease and reset: process context in associated CPU & pool->lock.
+ * read: pool->lock. Ensured to be seen when decreased or reset to zero.
+ */
+ int nr_running;
struct list_head worklist; /* L: list of pending works */
@@ -777,7 +782,7 @@ static bool work_is_canceling(struct work_struct *work)
static bool __need_more_worker(struct worker_pool *pool)
{
- return !atomic_read(&pool->nr_running);
+ return !pool->nr_running;
}
/*
@@ -802,8 +807,7 @@ static bool may_start_working(struct worker_pool *pool)
/* Do I need to keep working? Called from currently running workers. */
static bool keep_working(struct worker_pool *pool)
{
- return !list_empty(&pool->worklist) &&
- atomic_read(&pool->nr_running) <= 1;
+ return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
}
/* Do we need a new worker? Called from manager. */
@@ -873,7 +877,7 @@ void wq_worker_running(struct task_struct *task)
*/
preempt_disable();
if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(&worker->pool->nr_running);
+ worker->pool->nr_running++;
preempt_enable();
worker->sleeping = 0;
}
@@ -917,8 +921,8 @@ void wq_worker_sleeping(struct task_struct *task)
return;
}
- if (atomic_dec_and_test(&pool->nr_running) &&
- !list_empty(&pool->worklist))
+ pool->nr_running--;
+ if (need_more_worker(pool))
wake_up_worker(pool);
raw_spin_unlock_irq(&pool->lock);
}
@@ -973,7 +977,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
/* If transitioning into NOT_RUNNING, adjust nr_running. */
if ((flags & WORKER_NOT_RUNNING) &&
!(worker->flags & WORKER_NOT_RUNNING)) {
- atomic_dec(&pool->nr_running);
+ pool->nr_running--;
}
worker->flags |= flags;
@@ -1005,7 +1009,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
*/
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(&pool->nr_running);
+ pool->nr_running++;
}
/**
@@ -1806,8 +1810,7 @@ static void worker_enter_idle(struct worker *worker)
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
/* Sanity check nr_running. */
- WARN_ON_ONCE(pool->nr_workers == pool->nr_idle &&
- atomic_read(&pool->nr_running));
+ WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
}
/**
@@ -4985,7 +4988,7 @@ static void unbind_workers(int cpu)
* an unbound (in terms of concurrency management) pool which
* are served by workers tied to the pool.
*/
- atomic_set(&pool->nr_running, 0);
+ pool->nr_running = 0;
/*
* With concurrency management just turned off, a busy
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists