[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1466041775-4528-9-git-send-email-yuyang.du@intel.com>
Date: Thu, 16 Jun 2016 09:49:32 +0800
From: Yuyang Du <yuyang.du@...el.com>
To: peterz@...radead.org, mingo@...nel.org,
linux-kernel@...r.kernel.org
Cc: umgwanakikbuti@...il.com, bsegall@...gle.com, pjt@...gle.com,
morten.rasmussen@....com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, matt@...eblueprint.co.uk,
Yuyang Du <yuyang.du@...el.com>
Subject: [RFC PATCH 08/11] sched: Remove SD_WAKE_AFFINE flag and replace it with SD_BALANCE_WAKE
SD_BALANCE_{FORK|EXEC|WAKE} flags are for select_task_rq() to select a
CPU to run a new task or a waking task. SD_WAKE_AFFINE is a flag to
try selecting the waker CPU to run the waking task.
SD_BALANCE_WAKE is not a sched_domain flag, but SD_WAKE_AFFINE is.
Conceptually, SD_BALANCE_WAKE should be a sched_domain flag just like
the other two, so we first make SD_BALANCE_WAKE a sched_domain flag.
Moreover, the semantic of SD_WAKE_AFFINE is included in the semantic
of SD_BALANCE_WAKE. When in wakeup balancing, it is natual to try
the waker CPU if the waker CPU is allowed, in that sense, we don't
need a separate flag to specify it, not mentioning that SD_WAKE_AFFINE
is almost enabled in every sched_domains.
With the above combined, there is no need to have SD_WAKE_AFFINE, so
we remove and replace it with SD_BALANCE_WAKE. This can be accomplished
without any functionality change.
Signed-off-by: Yuyang Du <yuyang.du@...el.com>
---
include/linux/sched.h | 1 -
kernel/sched/core.c | 7 +++----
kernel/sched/deadline.c | 2 +-
kernel/sched/fair.c | 9 ++++-----
kernel/sched/rt.c | 2 +-
5 files changed, 9 insertions(+), 12 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d74e757..0803abd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1014,7 +1014,6 @@ extern void wake_up_q(struct wake_q_head *head);
#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
-#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7ef6385..56ac8f1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5666,7 +5666,7 @@ static int sd_degenerate(struct sched_domain *sd)
}
/* Following flags don't use groups */
- if (sd->flags & (SD_WAKE_AFFINE))
+ if (sd->flags & (SD_BALANCE_WAKE))
return 0;
return 1;
@@ -6361,8 +6361,7 @@ sd_init(struct sched_domain_topology_level *tl,
| 1*SD_BALANCE_NEWIDLE
| 1*SD_BALANCE_EXEC
| 1*SD_BALANCE_FORK
- | 0*SD_BALANCE_WAKE
- | 1*SD_WAKE_AFFINE
+ | 1*SD_BALANCE_WAKE
| 0*SD_SHARE_CPUCAPACITY
| 0*SD_SHARE_PKG_RESOURCES
| 0*SD_SERIALIZE
@@ -6412,7 +6411,7 @@ sd_init(struct sched_domain_topology_level *tl,
if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
sd->flags &= ~(SD_BALANCE_EXEC |
SD_BALANCE_FORK |
- SD_WAKE_AFFINE);
+ SD_BALANCE_WAKE);
}
#endif
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index fcb7f02..037ab0f 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1323,7 +1323,7 @@ static int find_later_rq(struct task_struct *task)
rcu_read_lock();
for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_AFFINE) {
+ if (sd->flags & SD_BALANCE_WAKE) {
/*
* If possible, preempting this_cpu is
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d048203..f15461f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5474,8 +5474,7 @@ static int cpu_util(int cpu)
* that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
* SD_BALANCE_FORK, or SD_BALANCE_EXEC.
*
- * Balances load by selecting the idlest cpu in the idlest group, or under
- * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
+ * Balances load by selecting the idlest cpu in the idlest group.
*
* Returns the target cpu number.
*
@@ -5502,9 +5501,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
/*
* If both cpu and prev_cpu are part of this domain,
- * cpu is a valid SD_WAKE_AFFINE target.
+ * cpu is a valid SD_BALANCE_WAKE target.
*/
- if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
+ if (want_affine && (tmp->flags & SD_BALANCE_WAKE) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
affine_sd = tmp;
break;
@@ -5517,7 +5516,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
}
if (affine_sd) {
- sd = NULL; /* Prefer wake_affine over balance flags */
+ sd = NULL; /* Prefer SD_BALANCE_WAKE over other balance flags */
if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
new_cpu = cpu;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index d5690b7..d1c8f41 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1655,7 +1655,7 @@ static int find_lowest_rq(struct task_struct *task)
rcu_read_lock();
for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_AFFINE) {
+ if (sd->flags & SD_BALANCE_WAKE) {
int best_cpu;
/*
--
1.7.9.5
Powered by blists - more mailing lists