[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250731105543.40832-25-yurand2000@gmail.com>
Date: Thu, 31 Jul 2025 12:55:42 +0200
From: Yuri Andriaccio <yurand2000@...il.com>
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>
Cc: linux-kernel@...r.kernel.org,
Luca Abeni <luca.abeni@...tannapisa.it>,
Yuri Andriaccio <yuri.andriaccio@...tannapisa.it>
Subject: [RFC PATCH v2 24/25] sched/core: Execute enqueued balance callbacks when changing allowed CPUs
From: luca abeni <luca.abeni@...tannapisa.it>
Execute balancing callbacks when setting the affinity of a task, since the HCBS
scheduler may request balancing of throttled dl_servers to fully utilize the
server's bandwidth.
Signed-off-by: luca abeni <luca.abeni@...tannapisa.it>
Signed-off-by: Yuri Andriaccio <yurand2000@...il.com>
---
kernel/sched/core.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index eb9de8c7b1f..c8763c46030 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2947,6 +2947,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) ||
(task_current_donor(rq, p) && !task_current(rq, p))) {
struct task_struct *push_task = NULL;
+ struct balance_callback *head;
if ((flags & SCA_MIGRATE_ENABLE) &&
(p->migration_flags & MDF_PUSH) && !rq->push_busy) {
@@ -2965,11 +2966,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
}
preempt_disable();
+ head = splice_balance_callbacks(rq);
task_rq_unlock(rq, p, rf);
if (push_task) {
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
p, &rq->push_work);
}
+ balance_callbacks(rq, head);
preempt_enable();
if (complete)
@@ -3024,6 +3027,8 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
}
if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
+ struct balance_callback *head;
+
/*
* MIGRATE_ENABLE gets here because 'p == current', but for
* anything else we cannot do is_migration_disabled(), punt
@@ -3037,16 +3042,19 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
p->migration_flags &= ~MDF_PUSH;
preempt_disable();
+ head = splice_balance_callbacks(rq);
task_rq_unlock(rq, p, rf);
if (!stop_pending) {
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
&pending->arg, &pending->stop_work);
}
+ balance_callbacks(rq, head);
preempt_enable();
if (flags & SCA_MIGRATE_ENABLE)
return 0;
} else {
+ struct balance_callback *head;
if (!is_migration_disabled(p)) {
if (task_on_rq_queued(p))
@@ -3057,7 +3065,12 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
complete = true;
}
}
+
+ preempt_disable();
+ head = splice_balance_callbacks(rq);
task_rq_unlock(rq, p, rf);
+ balance_callbacks(rq, head);
+ preempt_enable();
if (complete)
complete_all(&pending->done);
--
2.50.1
Powered by blists - more mailing lists