[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201218170919.2950-8-jiangshanlai@gmail.com>
Date: Sat, 19 Dec 2020 01:09:16 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Valentin Schneider <valentin.schneider@....com>,
Peter Zijlstra <peterz@...radead.org>,
Qian Cai <cai@...hat.com>,
Vincent Donnefort <vincent.donnefort@....com>,
Lai Jiangshan <laijs@...ux.alibaba.com>,
Tejun Heo <tj@...nel.org>,
Lai Jiangshan <jiangshanlai@...il.com>,
Daniel Bristot de Oliveira <bristot@...hat.com>
Subject: [PATCH -tip V2 07/10] workqueue: Manually break affinity on hotplug for unbound pool
From: Lai Jiangshan <laijs@...ux.alibaba.com>
There is possible that a per-node pool/woker's affinity is a single
CPU. It can happen when wq_unbound_cpumask is changed by system adim
via /sys/devices/virtual/workqueue/cpumask. And pool->attrs->cpumask
is wq_unbound_cpumask & possible_cpumask_of_the_node, which can be a
single CPU and makes the pool's workers to be "per cpu kthread".
And the scheduler won't break affinity on the "per cpu kthread" workers
when the CPU is going down, so we have to do it by our own.
We do it by reusing existing restore_unbound_workers_cpumask() and rename
it to update_unbound_workers_cpumask(). When the number of the online
CPU of the pool goes from 1 to 0, we break the affinity initiatively.
Note here, we even break the affinity for non-per-cpu-kthread workers,
because first, the code path is slow path which is not worth too much to
optimize, second, we don't need to rely on the code/conditions when the
scheduler forces breaking affinity for us.
The way to break affinity is to set the workers' affinity to
cpu_possible_mask, so that we preserve the same behavisor when
the scheduler breaks affinity for us.
Fixes: 06249738a41a ("workqueue: Manually break affinity on hotplug")
Acked-by: Tejun Heo <tj@...nel.org>
Signed-off-by: Lai Jiangshan <laijs@...ux.alibaba.com>
---
kernel/workqueue.c | 49 ++++++++++++++++++++++++++++++++--------------
1 file changed, 34 insertions(+), 15 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index eda293097fe1..c2b66679c0aa 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5027,16 +5027,16 @@ static void rebind_workers(struct worker_pool *pool)
}
/**
- * restore_unbound_workers_cpumask - restore cpumask of unbound workers
+ * update_unbound_workers_cpumask - update cpumask of unbound workers
* @pool: unbound pool of interest
- * @cpu: the CPU which is coming up
+ * @cpu: the CPU which is coming up or going down
*
* An unbound pool may end up with a cpumask which doesn't have any online
- * CPUs. When a worker of such pool get scheduled, the scheduler resets
- * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
- * online CPU before, cpus_allowed of all its workers should be restored.
+ * CPUs. We have to reset workers' cpus_allowed of such pool. And we
+ * restore the workers' cpus_allowed when the pool's cpumask has online
+ * CPU for the first time after reset.
*/
-static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
+static void update_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
{
static cpumask_t cpumask;
struct worker *worker;
@@ -5050,13 +5050,19 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
cpumask_and(&cpumask, pool->attrs->cpumask, wq_online_cpumask);
- /* is @cpu the first one onlined for the @pool? */
- if (cpumask_weight(&cpumask) > 1)
- return;
-
- /* as we're called from CPU_ONLINE, the following shouldn't fail */
- for_each_pool_worker(worker, pool)
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0);
+ switch (cpumask_weight(&cpumask)) {
+ case 0: /* @cpu is the last one going down for the @pool. */
+ for_each_pool_worker(worker, pool)
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+ break;
+ case 1: /* @cpu is the first one onlined for the @pool. */
+ /* as we're called from CPU_ONLINE, the following shouldn't fail */
+ for_each_pool_worker(worker, pool)
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0);
+ break;
+ default: /* other cases, nothing to do */
+ break;
+ }
}
int workqueue_prepare_cpu(unsigned int cpu)
@@ -5087,7 +5093,7 @@ int workqueue_online_cpu(unsigned int cpu)
if (pool->cpu == cpu)
rebind_workers(pool);
else if (pool->cpu < 0)
- restore_unbound_workers_cpumask(pool, cpu);
+ update_unbound_workers_cpumask(pool, cpu);
mutex_unlock(&wq_pool_attach_mutex);
}
@@ -5102,7 +5108,9 @@ int workqueue_online_cpu(unsigned int cpu)
int workqueue_offline_cpu(unsigned int cpu)
{
+ struct worker_pool *pool;
struct workqueue_struct *wq;
+ int pi;
/* unbinding per-cpu workers should happen on the local CPU */
if (WARN_ON(cpu != smp_processor_id()))
@@ -5110,9 +5118,20 @@ int workqueue_offline_cpu(unsigned int cpu)
unbind_workers(cpu);
- /* update NUMA affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
cpumask_clear_cpu(cpu, wq_online_cpumask);
+
+ /* update CPU affinity of workers of unbound pools */
+ for_each_pool(pool, pi) {
+ mutex_lock(&wq_pool_attach_mutex);
+
+ if (pool->cpu < 0)
+ update_unbound_workers_cpumask(pool, cpu);
+
+ mutex_unlock(&wq_pool_attach_mutex);
+ }
+
+ /* update NUMA affinity of unbound workqueues */
list_for_each_entry(wq, &workqueues, list)
wq_update_unbound_numa(wq, cpu);
mutex_unlock(&wq_pool_mutex);
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists