[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170501085703.e52pfmohplc5vu7w@hirez.programming.kicks-ass.net>
Date: Mon, 1 May 2017 10:57:03 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Lauro Venancio <lvenanci@...hat.com>
Cc: mingo@...nel.org, lwang@...hat.com, riel@...hat.com, efault@....de,
tglx@...utronix.de, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 00/14] sched/topology fixes
On Fri, Apr 28, 2017 at 08:30:05PM -0300, Lauro Venancio wrote:
> On 04/28/2017 10:53 AM, Peter Zijlstra wrote:
> > Also, the following occurred to me:
> >
> > sg_span & sg_mask == sg_mask
> >
> > Therefore, we don't need to do the whole "sg_span &" business.
> >
> > Hmm?
> Agreed. Maybe we have to rename "mask" to something else. Maybe
> "group_reached_by_cpus" or "group_installed_on_cpus".
I went with group_balance_mask() to match the existing
group_balance_cpu().
And now the temptation is very great to also fix up the
sched_group_cpus() vs sched_domain_span() thing...
---
Subject: sched/topology: Rename sched_group_mask()
From: Peter Zijlstra <peterz@...radead.org>
Date: Mon May 1 10:47:02 CEST 2017
Since sched_group_mask() is now an independent cpumask (it no longer
masks sched_group_cpus()), rename the thing.
Suggested-by: Lauro Ramos Venancio <lvenanci@...hat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/sched/fair.c | 4 +---
kernel/sched/sched.h | 7 +++----
kernel/sched/topology.c | 31 ++++++++++++++-----------------
3 files changed, 18 insertions(+), 24 deletions(-)
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7996,7 +7996,6 @@ static int active_load_balance_cpu_stop(
static int should_we_balance(struct lb_env *env)
{
struct sched_group *sg = env->sd->groups;
- struct cpumask *sg_mask;
int cpu, balance_cpu = -1;
/*
@@ -8006,9 +8005,8 @@ static int should_we_balance(struct lb_e
if (env->idle == CPU_NEWLY_IDLE)
return 1;
- sg_mask = sched_group_mask(sg);
/* Try to find first idle cpu */
- for_each_cpu_and(cpu, sg_mask, env->cpus) {
+ for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
if (!idle_cpu(cpu))
continue;
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1027,7 +1027,7 @@ struct sched_group_capacity {
int id;
#endif
- unsigned long cpumask[0]; /* iteration mask */
+ unsigned long cpumask[0]; /* balance mask */
};
struct sched_group {
@@ -1054,10 +1054,9 @@ static inline struct cpumask *sched_grou
}
/*
- * cpumask masking which cpus in the group are allowed to iterate up the domain
- * tree.
+ * See build_balance_mask().
*/
-static inline struct cpumask *sched_group_mask(struct sched_group *sg)
+static inline struct cpumask *group_balance_mask(struct sched_group *sg)
{
return to_cpumask(sg->sgc->cpumask);
}
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -86,9 +86,9 @@ static int sched_domain_debug_one(struct
cpumask_pr_args(sched_group_cpus(group)));
if ((sd->flags & SD_OVERLAP) &&
- !cpumask_equal(sched_group_mask(group), sched_group_cpus(group))) {
+ !cpumask_equal(group_balance_mask(group), sched_group_cpus(group))) {
printk(KERN_CONT " mask=%*pbl",
- cpumask_pr_args(sched_group_mask(group)));
+ cpumask_pr_args(group_balance_mask(group)));
}
if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
@@ -497,16 +497,16 @@ enum s_alloc {
/*
* Return the canonical balance CPU for this group, this is the first CPU
- * of this group that's also in the iteration mask.
+ * of this group that's also in the balance mask.
*
- * The iteration mask are all those CPUs that could actually end up at this
- * group. See build_group_mask().
+ * The balance mask are all those CPUs that could actually end up at this
+ * group. See build_balance_mask().
*
* Also see should_we_balance().
*/
int group_balance_cpu(struct sched_group *sg)
{
- return cpumask_first(sched_group_mask(sg));
+ return cpumask_first(group_balance_mask(sg));
}
@@ -563,7 +563,7 @@ int group_balance_cpu(struct sched_group
* groups include the CPUs of Node-0, while those CPUs would not in fact ever
* end up at those groups (they would end up in group: 0-1,3).
*
- * To correct this we have to introduce the group iteration mask. This mask
+ * To correct this we have to introduce the group balance mask. This mask
* will contain those CPUs in the group that can reach this group given the
* (child) domain tree.
*
@@ -607,11 +607,8 @@ int group_balance_cpu(struct sched_group
/*
- * Build an iteration mask that can exclude certain CPUs from the upwards
- * domain traversal.
- *
- * Only CPUs that can arrive at this group should be considered to continue
- * balancing.
+ * Build the balance mask; it contains only those CPUs that can arrive at this
+ * group and should be considered to continue balancing.
*
* We do this during the group creation pass, therefore the group information
* isn't complete yet, however since each group represents a (child) domain we
@@ -619,7 +616,7 @@ int group_balance_cpu(struct sched_group
* complete).
*/
static void
-build_group_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
+build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
{
const struct cpumask *sg_span = sched_group_cpus(sg);
struct sd_data *sdd = sd->private;
@@ -684,14 +681,14 @@ static void init_overlap_sched_group(str
struct cpumask *sg_span;
int cpu;
- build_group_mask(sd, sg, mask);
+ build_balance_mask(sd, sg, mask);
cpu = cpumask_first_and(sched_group_cpus(sg), mask);
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
if (atomic_inc_return(&sg->sgc->ref) == 1)
- cpumask_copy(sched_group_mask(sg), mask);
+ cpumask_copy(group_balance_mask(sg), mask);
else
- WARN_ON_ONCE(!cpumask_equal(sched_group_mask(sg), mask));
+ WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
/*
* Initialize sgc->capacity such that even if we mess up the
@@ -888,7 +885,7 @@ build_sched_groups(struct sched_domain *
continue;
group = get_group(i, sdd, &sg);
- cpumask_copy(sched_group_mask(sg), sched_group_cpus(sg));
+ cpumask_copy(group_balance_mask(sg), sched_group_cpus(sg));
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
Powered by blists - more mailing lists