[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1490060300-17593-1-git-send-email-byungchul.park@lge.com>
Date: Tue, 21 Mar 2017 10:38:20 +0900
From: Byungchul Park <byungchul.park@....com>
To: peterz@...radead.org, mingo@...nel.org
Cc: linux-kernel@...r.kernel.org, juri.lelli@...il.com,
rostedt@...dmis.org, kernel-team@....com
Subject: [PATCH] sched/deadline: Make find_later_rq() choose a closer cpu in topology
When cpudl_find() returns any among free_cpus, the cpu might not be
closer than others, considering sched domain. For example:
this_cpu: 15
free_cpus: 0, 1,..., 14 (== later_mask)
best_cpu: 0
topology:
0 --+
+--+
1 --+ |
+-- ... --+
2 --+ | |
+--+ |
3 --+ |
... ...
12 --+ |
+--+ |
13 --+ | |
+-- ... -+
14 --+ |
+--+
15 --+
In this case, it would be best to select 14 since it's a free cpu and
closest to 15(this_cpu). However, currently the code select 0(best_cpu)
even though that's just any among free_cpus. Fix it.
Signed-off-by: Byungchul Park <byungchul.park@....com>
---
kernel/sched/cpudeadline.c | 20 ++++++++------------
kernel/sched/deadline.c | 29 +++++++++++++++--------------
2 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index fba235c..cb36727 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -119,7 +119,7 @@ static inline int cpudl_maximum(struct cpudl *cp)
* @p: the task
* @later_mask: a mask to fill in with the selected CPUs (or NULL)
*
- * Returns: int - best CPU (heap maximum if suitable)
+ * Returns: (int)bool - CPUs were found
*/
int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask)
@@ -127,21 +127,17 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl;
- if (later_mask &&
- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
- best_cpu = cpumask_any(later_mask);
- goto out;
- } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
- dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
- best_cpu = cpudl_maximum(cp);
- if (later_mask)
+ if (later_mask) {
+ if (!cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed) &&
+ cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
+ dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
+ best_cpu = cpudl_maximum(cp);
cpumask_set_cpu(best_cpu, later_mask);
+ }
}
-out:
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
-
- return best_cpu;
+ return best_cpu != -1;
}
/*
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a2ce590..45cc3d3 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1324,7 +1324,7 @@ static int find_later_rq(struct task_struct *task)
struct sched_domain *sd;
struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
int this_cpu = smp_processor_id();
- int best_cpu, cpu = task_cpu(task);
+ int cpu = task_cpu(task);
/* Make sure the mask is initialized first */
if (unlikely(!later_mask))
@@ -1337,17 +1337,14 @@ static int find_later_rq(struct task_struct *task)
* We have to consider system topology and task affinity
* first, then we can look for a suitable cpu.
*/
- best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
- task, later_mask);
- if (best_cpu == -1)
+ if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
return -1;
/*
- * If we are here, some target has been found,
- * the most suitable of which is cached in best_cpu.
- * This is, among the runqueues where the current tasks
- * have later deadlines than the task's one, the rq
- * with the latest possible one.
+ * If we are here, some targets have been found, including
+ * the most suitable which is, among the runqueues where the
+ * current tasks have later deadlines than the task's one, the
+ * rq with the latest possible one.
*
* Now we check how well this matches with task's
* affinity and system topology.
@@ -1367,6 +1364,7 @@ static int find_later_rq(struct task_struct *task)
rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
+ int closest_cpu;
/*
* If possible, preempting this_cpu is
@@ -1378,14 +1376,17 @@ static int find_later_rq(struct task_struct *task)
return this_cpu;
}
+ closest_cpu = cpumask_first_and(later_mask,
+ sched_domain_span(sd));
/*
- * Last chance: if best_cpu is valid and is
- * in the mask, that becomes our choice.
+ * Last chance: if a cpu being in both later_mask
+ * and current sd span is valid, that becomes our
+ * choice. Of course, the latest possible cpu is
+ * already under consideration through later_mask.
*/
- if (best_cpu < nr_cpu_ids &&
- cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
+ if (closest_cpu < nr_cpu_ids) {
rcu_read_unlock();
- return best_cpu;
+ return closest_cpu;
}
}
}
--
1.9.1
Powered by blists - more mailing lists