[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-bf475ce0a3dd75b5d1df6c6c14ae25168caa15ac@git.kernel.org>
Date: Wed, 16 Nov 2016 04:13:37 -0800
From: tip-bot for Morten Rasmussen <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: tglx@...utronix.de, mingo@...nel.org, linux-kernel@...r.kernel.org,
morten.rasmussen@....com, torvalds@...ux-foundation.org,
peterz@...radead.org, hpa@...or.com
Subject: [tip:sched/core] sched/fair: Add per-CPU min capacity to
sched_group_capacity
Commit-ID: bf475ce0a3dd75b5d1df6c6c14ae25168caa15ac
Gitweb: http://git.kernel.org/tip/bf475ce0a3dd75b5d1df6c6c14ae25168caa15ac
Author: Morten Rasmussen <morten.rasmussen@....com>
AuthorDate: Fri, 14 Oct 2016 14:41:09 +0100
Committer: Ingo Molnar <mingo@...nel.org>
CommitDate: Wed, 16 Nov 2016 10:29:06 +0100
sched/fair: Add per-CPU min capacity to sched_group_capacity
struct sched_group_capacity currently represents the compute capacity
sum of all CPUs in the sched_group.
Unless it is divided by the group_weight to get the average capacity
per CPU, it hides differences in CPU capacity for mixed capacity systems
(e.g. high RT/IRQ utilization or ARM big.LITTLE).
But even the average may not be sufficient if the group covers CPUs of
different capacities.
Instead, by extending struct sched_group_capacity to indicate min per-CPU
capacity in the group a suitable group for a given task utilization can
more easily be found such that CPUs with reduced capacity can be avoided
for tasks with high utilization (not implemented by this patch).
Signed-off-by: Morten Rasmussen <morten.rasmussen@....com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: dietmar.eggemann@....com
Cc: freedom.tan@...iatek.com
Cc: keita.kobayashi.ym@...esas.com
Cc: mgalbraith@...e.de
Cc: sgurrappadi@...dia.com
Cc: vincent.guittot@...aro.org
Cc: yuyang.du@...el.com
Link: http://lkml.kernel.org/r/1476452472-24740-4-git-send-email-morten.rasmussen@arm.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
kernel/sched/core.c | 3 ++-
kernel/sched/fair.c | 17 ++++++++++++-----
kernel/sched/sched.h | 3 ++-
3 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f3cfa0d..6bf1fd3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5708,7 +5708,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
printk(KERN_CONT " %*pbl",
cpumask_pr_args(sched_group_cpus(group)));
if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
- printk(KERN_CONT " (cpu_capacity = %d)",
+ printk(KERN_CONT " (cpu_capacity = %lu)",
group->sgc->capacity);
}
@@ -6185,6 +6185,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* die on a /0 trap.
*/
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+ sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
/*
* Make sure the first group of this domain contains the
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1ad3706..faf8f18 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6909,13 +6909,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
+ sdg->sgc->min_capacity = capacity;
}
void update_group_capacity(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
- unsigned long capacity;
+ unsigned long capacity, min_capacity;
unsigned long interval;
interval = msecs_to_jiffies(sd->balance_interval);
@@ -6928,6 +6929,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
}
capacity = 0;
+ min_capacity = ULONG_MAX;
if (child->flags & SD_OVERLAP) {
/*
@@ -6952,11 +6954,12 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
*/
if (unlikely(!rq->sd)) {
capacity += capacity_of(cpu);
- continue;
+ } else {
+ sgc = rq->sd->groups->sgc;
+ capacity += sgc->capacity;
}
- sgc = rq->sd->groups->sgc;
- capacity += sgc->capacity;
+ min_capacity = min(capacity, min_capacity);
}
} else {
/*
@@ -6966,12 +6969,16 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
group = child->groups;
do {
- capacity += group->sgc->capacity;
+ struct sched_group_capacity *sgc = group->sgc;
+
+ capacity += sgc->capacity;
+ min_capacity = min(sgc->min_capacity, min_capacity);
group = group->next;
} while (group != child->groups);
}
sdg->sgc->capacity = capacity;
+ sdg->sgc->min_capacity = min_capacity;
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 055f935..345c1cc 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -892,7 +892,8 @@ struct sched_group_capacity {
* CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
* for a single CPU.
*/
- unsigned int capacity;
+ unsigned long capacity;
+ unsigned long min_capacity; /* Min per-CPU capacity in group */
unsigned long next_update;
int imbalance; /* XXX unrelated to capacity but shared group state */
Powered by blists - more mailing lists