[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240205194602.871505-3-longman@redhat.com>
Date: Mon, 5 Feb 2024 14:46:00 -0500
From: Waiman Long <longman@...hat.com>
To: Tejun Heo <tj@...nel.org>,
Lai Jiangshan <jiangshanlai@...il.com>
Cc: linux-kernel@...r.kernel.org,
Juri Lelli <juri.lelli@...hat.com>,
Cestmir Kalina <ckalina@...hat.com>,
Alex Gladkov <agladkov@...hat.com>,
Phil Auld <pauld@...hat.com>,
Costa Shulyupin <cshulyup@...hat.com>,
Waiman Long <longman@...hat.com>
Subject: [PATCH-wq v3 2/4] workqueue: Thaw frozen pwq in workqueue_apply_unbound_cpumask()
workqueue_apply_unbound_cpumask() cannot proceed with an ordered
workqueue if its dfl_pwq is still frozen. Just do a sleep wait for
it to be thawed may not work in some cases if pwq_release_workfn() is
somehow prevented from being called due to resources (e.g. wq_pool_mutex)
that are held by its caller.
To break the logjam, we have to actively check if the frozen dfl_pwq
is ready to be thawed and call thaw_pwq() directly if so.
Signed-off-by: Waiman Long <longman@...hat.com>
Tested-by: Juri Lelli <juri.lelli@...hat.com>
---
kernel/workqueue.c | 31 +++++++++++++++++++++++++++++--
1 file changed, 29 insertions(+), 2 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9b107e8a2c15..f453f339f74a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -6541,6 +6541,33 @@ void thaw_workqueues(void)
}
#endif /* CONFIG_FREEZER */
+/*
+ * Check the given ordered workqueue to see if its non-default pwq's have
+ * zero reference count and if so thaw the frozen default pwq.
+ *
+ * Return:
+ * %true if dfl_pwq has been thawed or %false otherwise.
+ */
+static bool ordered_workqueue_ref_check(struct workqueue_struct *wq)
+{
+ struct pool_workqueue *dfl_pwq = rcu_access_pointer(wq->dfl_pwq);
+ struct pool_workqueue *pwq;
+ int refs = 0;
+
+ if (!READ_ONCE(dfl_pwq->frozen))
+ return true;
+ mutex_lock(&wq->mutex);
+ for_each_pwq(pwq, wq) {
+ if (pwq == dfl_pwq)
+ continue;
+ refs += pwq->refcnt;
+ }
+ if (!refs)
+ thaw_pwq(dfl_pwq);
+ mutex_unlock(&wq->mutex);
+ return !refs;
+}
+
static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
{
LIST_HEAD(ctxs);
@@ -6566,12 +6593,12 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
dfl_pwq = rcu_access_pointer(wq->dfl_pwq);
if (!(wq->flags & __WQ_ORDERED_EXPLICIT)) {
wq->flags &= ~__WQ_ORDERED;
- } else if (dfl_pwq && dfl_pwq->frozen) {
+ } else if (dfl_pwq && !ordered_workqueue_ref_check(wq)) {
int i;
for (i = 0; i < 10; i++) {
msleep(10);
- if (!dfl_pwq->frozen)
+ if (ordered_workqueue_ref_check(wq))
break;
}
if (WARN_ON_ONCE(dfl_pwq->frozen))
--
2.39.3
Powered by blists - more mailing lists