[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251121145720.342467-7-jiangshanlai@gmail.com>
Date: Fri, 21 Nov 2025 22:57:19 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Tejun Heo <tj@...nel.org>,
ying chen <yc1082463@...il.com>,
Lai Jiangshan <jiangshan.ljs@...group.com>,
Lai Jiangshan <jiangshanlai@...il.com>
Subject: [PATCH V3 6/7] workqueue: Limit number of processed works in rescuer per turn
From: Lai Jiangshan <jiangshan.ljs@...group.com>
Currently the rescuer keeps looping until all work on a PWQ is done, and
this may hurt fairness among PWQs, as the rescuer could remain stuck on
one PWQ indefinitely.
Introduce RESCUER_BATCH to control the maximum number of work items the
rescuer processes in each turn, and move on to other PWQs when the limit
is reached.
Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>
---
kernel/workqueue.c | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 49dce50ff647..9bc155545492 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -117,6 +117,8 @@ enum wq_internal_consts {
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */
+ RESCUER_BATCH = 16, /* process items per turn */
+
/*
* Rescue workers are used only on emergencies and shared by
* all cpus. Give MIN_NICE.
@@ -3456,7 +3458,7 @@ static int worker_thread(void *__worker)
goto woke_up;
}
-static bool assign_rescuer_work(struct pool_workqueue *pwq, struct worker *rescuer)
+static bool assign_rescuer_work(struct pool_workqueue *pwq, struct worker *rescuer, bool limited)
{
struct worker_pool *pool = pwq->pool;
struct work_struct *cursor = &pwq->mayday_cursor;
@@ -3477,7 +3479,20 @@ static bool assign_rescuer_work(struct pool_workqueue *pwq, struct worker *rescu
/* try to assign a work to rescue */
list_for_each_entry_safe_from(work, n, &pool->worklist, entry) {
- if (get_work_pwq(work) == pwq && assign_work(work, rescuer, &n)) {
+ if (get_work_pwq(work) != pwq)
+ continue;
+ /*
+ * put the cursor, resend mayday for itself and move on to other
+ * PWQs when the limit is reached.
+ */
+ if (limited && !list_empty(&pwq->wq->maydays)) {
+ list_add_tail(&cursor->entry, &work->entry);
+ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
+ send_mayday(work);
+ raw_spin_unlock(&wq_mayday_lock);
+ return false;
+ }
+ if (assign_work(work, rescuer, &n)) {
pwq->stats[PWQ_STAT_RESCUED]++;
/* put the cursor for next search */
list_add_tail(&cursor->entry, &n->entry);
@@ -3542,6 +3557,7 @@ static int rescuer_thread(void *__rescuer)
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
struct pool_workqueue, mayday_node);
struct worker_pool *pool = pwq->pool;
+ unsigned int count = 0;
__set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node);
@@ -3554,7 +3570,7 @@ static int rescuer_thread(void *__rescuer)
WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
- while (assign_rescuer_work(pwq, rescuer))
+ while (assign_rescuer_work(pwq, rescuer, ++count > RESCUER_BATCH))
process_scheduled_works(rescuer);
/*
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists