[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20140623125233.GZ19860@laptop.programming.kicks-ass.net>
Date: Mon, 23 Jun 2014 14:52:33 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Tim Chen <tim.c.chen@...ux.intel.com>
Cc: Ingo Molnar <mingo@...e.hu>, Andi Kleen <andi@...stfloor.org>,
Michel Lespinasse <walken@...gle.com>,
Rik van Riel <riel@...hat.com>,
Peter Hurley <peter@...leysoftware.com>,
Jason Low <jason.low2@...com>,
Davidlohr Bueson <davidlohr@...com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2] sched: Fast idling of CPU when system is partially
loaded
On Mon, Jun 16, 2014 at 12:48:47PM -0700, Tim Chen wrote:
> +++ b/kernel/sched/fair.c
> @@ -5863,7 +5863,8 @@ static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
> */
> static inline void update_sg_lb_stats(struct lb_env *env,
> struct sched_group *group, int load_idx,
> - int local_group, struct sg_lb_stats *sgs)
> + int local_group, struct sg_lb_stats *sgs,
> + bool *overload)
> {
> unsigned long load;
> int i;
> @@ -5881,6 +5882,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
>
> sgs->group_load += load;
> sgs->sum_nr_running += rq->nr_running;
> + if (overload && rq->nr_running > 1)
> + *overload = true;
> #ifdef CONFIG_NUMA_BALANCING
> sgs->nr_numa_running += rq->nr_numa_running;
> sgs->nr_preferred_running += rq->nr_preferred_running;
> @@ -5991,6 +5994,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
> struct sched_group *sg = env->sd->groups;
> struct sg_lb_stats tmp_sgs;
> int load_idx, prefer_sibling = 0;
> + bool overload = false;
>
> if (child && child->flags & SD_PREFER_SIBLING)
> prefer_sibling = 1;
> @@ -6011,7 +6015,13 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
> update_group_power(env->sd, env->dst_cpu);
> }
>
> - update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
> + if (env->sd->parent)
> + update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
> + NULL);
> + else
> + /* gather overload info if we are at root domain */
> + update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
> + &overload);
>
> if (local_group)
> goto next_group;
> @@ -6045,6 +6055,13 @@ next_group:
>
> if (env->sd->flags & SD_NUMA)
> env->fbq_type = fbq_classify_group(&sds->busiest_stat);
> +
> + if (!env->sd->parent) {
> + /* update overload indicator if we are at root domain */
> + if (env->dst_rq->rd->overload != overload)
> + env->dst_rq->rd->overload = overload;
> + }
> +
> }
>
> /**
So I don't get why we can't do the below; I think Jason tried to ask the
same...
Making that overload thing unconditional makes the code simpler and the
cost is about the same; it doesn't matter if we test the pointer or
->nr_running, which we've already loaded anyhow.
Also, with only having a single update_sg_lb_stats() callsite GCC can
more easily inline the lot.
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5886,7 +5886,7 @@ static inline void update_sg_lb_stats(st
sgs->group_load += load;
sgs->sum_nr_running += rq->nr_running;
- if (overload && rq->nr_running > 1)
+ if (rq->nr_running > 1)
*overload = true;
#ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running;
@@ -6019,13 +6019,7 @@ static inline void update_sd_lb_stats(st
update_group_capacity(env->sd, env->dst_cpu);
}
- if (env->sd->parent)
- update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
- NULL);
- else
- /* gather overload info if we are at root domain */
- update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
- &overload);
+ update_sg_lb_stats(env, sg, load_idx, local_group, sgs, &overload);
if (local_group)
goto next_group;
@@ -6065,7 +6059,6 @@ static inline void update_sd_lb_stats(st
if (env->dst_rq->rd->overload != overload)
env->dst_rq->rd->overload = overload;
}
-
}
/**
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists