[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250512115325.30022-3-huschle@linux.ibm.com>
Date: Mon, 12 May 2025 13:53:23 +0200
From: Tobias Huschle <huschle@...ux.ibm.com>
To: linux-kernel@...r.kernel.org
Cc: mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
vschneid@...hat.com, sshegde@...ux.ibm.com
Subject: [RFC PATCH v3 2/4] sched/rt: add support for parked CPUs
Realtime tasks must also react to the parked states of CPUs. Tasks will
be treated as if the parked CPUs have no free capacity to work on them.
A dynamic change in the parked state of CPUs is handled correctly if
realtime tasks do not consume 100% CPU time, without any interruption.
If a realtime tasks runs without interruption, it will never enter the
load balancing code and will therefore remain on a CPU, even if the CPU
becomes classified as parked. Any value below 100% causes the task to
be migrated off a CPU which has just been classified as parked.
Signed-off-by: Tobias Huschle <huschle@...ux.ibm.com>
---
kernel/sched/rt.c | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index fa03ec3ed56a..595d760304fb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -460,6 +460,9 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
unsigned int max_cap;
unsigned int cpu_cap;
+ if (arch_cpu_parked(cpu))
+ return false;
+
/* Only heterogeneous systems can benefit from this check */
if (!sched_asym_cpucap_active())
return true;
@@ -474,6 +477,9 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
#else
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
+ if (arch_cpu_parked(cpu))
+ return false;
+
return true;
}
#endif
@@ -1799,6 +1805,8 @@ static int find_lowest_rq(struct task_struct *task)
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
int ret;
+ int parked_cpu = 0;
+ int tmp_cpu;
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
@@ -1807,11 +1815,18 @@ static int find_lowest_rq(struct task_struct *task)
if (task->nr_cpus_allowed == 1)
return -1; /* No other targets possible */
+ for_each_cpu(tmp_cpu, cpu_online_mask) {
+ if (arch_cpu_parked(tmp_cpu)) {
+ parked_cpu = tmp_cpu;
+ break;
+ }
+ }
+
/*
* If we're on asym system ensure we consider the different capacities
* of the CPUs when searching for the lowest_mask.
*/
- if (sched_asym_cpucap_active()) {
+ if (sched_asym_cpucap_active() || parked_cpu > -1) {
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
task, lowest_mask,
@@ -1833,14 +1848,14 @@ static int find_lowest_rq(struct task_struct *task)
* We prioritize the last CPU that the task executed on since
* it is most likely cache-hot in that location.
*/
- if (cpumask_test_cpu(cpu, lowest_mask))
+ if (cpumask_test_cpu(cpu, lowest_mask) && !arch_cpu_parked(cpu))
return cpu;
/*
* Otherwise, we consult the sched_domains span maps to figure
* out which CPU is logically closest to our hot cache data.
*/
- if (!cpumask_test_cpu(this_cpu, lowest_mask))
+ if (!cpumask_test_cpu(this_cpu, lowest_mask) || arch_cpu_parked(this_cpu))
this_cpu = -1; /* Skip this_cpu opt if not among lowest */
rcu_read_lock();
@@ -1860,7 +1875,7 @@ static int find_lowest_rq(struct task_struct *task)
best_cpu = cpumask_any_and_distribute(lowest_mask,
sched_domain_span(sd));
- if (best_cpu < nr_cpu_ids) {
+ if (best_cpu < nr_cpu_ids && !arch_cpu_parked(best_cpu)) {
rcu_read_unlock();
return best_cpu;
}
@@ -1877,7 +1892,7 @@ static int find_lowest_rq(struct task_struct *task)
return this_cpu;
cpu = cpumask_any_distribute(lowest_mask);
- if (cpu < nr_cpu_ids)
+ if (cpu < nr_cpu_ids && !arch_cpu_parked(cpu))
return cpu;
return -1;
--
2.34.1
Powered by blists - more mailing lists