[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1403656568-32445-9-git-send-email-yuyang.du@intel.com>
Date: Wed, 25 Jun 2014 08:36:07 +0800
From: Yuyang Du <yuyang.du@...el.com>
To: mingo@...hat.com, peterz@...radead.org, rafael.j.wysocki@...el.com,
linux-kernel@...r.kernel.org, linux-pm@...r.kernel.org
Cc: arjan.van.de.ven@...el.com, len.brown@...el.com,
alan.cox@...el.com, mark.gross@...el.com, morten.rasmussen@....com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rajeev.d.muralidhar@...el.com, vishwesh.m.rudramuni@...el.com,
nicole.chalhoub@...el.com, ajaya.durg@...el.com,
harinarayanan.seshadri@...el.com, jacob.jun.pan@...ux.intel.com,
Yuyang Du <yuyang.du@...el.com>
Subject: [RFC PATCH 8/9 v4] Implement Workload Consolidation in nohz_idle_balance
In periodic nohz idle balance, we skip kicking idle but non-consolidated CPUs.
Signed-off-by: Yuyang Du <yuyang.du@...el.com>
---
kernel/sched/fair.c | 55 +++++++++++++++++++++++++++++++++++++++++----------
1 file changed, 45 insertions(+), 10 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bf65fde..549f6e0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6983,10 +6983,45 @@ static struct {
static inline int find_new_ilb(void)
{
- int ilb = cpumask_first(nohz.idle_cpus_mask);
+ int ilb;
- if (ilb < nr_cpu_ids && idle_cpu(ilb))
- return ilb;
+ /*
+ * Optimize for the case when we have no idle CPUs or only one
+ * idle CPU. Don't walk the sched_domain hierarchy in such cases
+ */
+ if (cpumask_weight(nohz.idle_cpus_mask) < 2)
+ return nr_cpu_ids;
+
+ ilb = cpumask_first(nohz.idle_cpus_mask);
+
+ if (ilb < nr_cpu_ids && idle_cpu(ilb)) {
+ struct sched_domain *sd;
+ int this_cpu = smp_processor_id();
+
+ sd = per_cpu(sd_wc, this_cpu);
+ if (sd) {
+ struct cpumask *nonshielded_cpus = __get_cpu_var(load_balance_mask);
+
+ cpumask_copy(nonshielded_cpus, nohz.idle_cpus_mask);
+
+ rcu_read_lock();
+ wc_nonshielded_mask(this_cpu, sd, nonshielded_cpus);
+ rcu_read_unlock();
+
+ if (cpumask_weight(nonshielded_cpus) < 2)
+ return nr_cpu_ids;
+
+ /*
+ * get idle load balancer again
+ */
+ ilb = cpumask_first(nonshielded_cpus);
+
+ if (ilb < nr_cpu_ids && idle_cpu(ilb))
+ return ilb;
+ }
+ else
+ return ilb;
+ }
return nr_cpu_ids;
}
@@ -7217,7 +7252,7 @@ out:
* In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
* rebalancing for all the cpus for whom scheduler ticks are stopped.
*/
-static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
+static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle, struct cpumask *mask)
{
int this_cpu = this_rq->cpu;
struct rq *rq;
@@ -7227,7 +7262,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
!test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
goto end;
- for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
+ for_each_cpu(balance_cpu, mask) {
if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
continue;
@@ -7280,10 +7315,10 @@ static inline int nohz_kick_needed(struct rq *rq)
if (unlikely(rq->idle_balance))
return 0;
- /*
- * We may be recently in ticked or tickless idle mode. At the first
- * busy tick after returning from idle, we will update the busy stats.
- */
+ /*
+ * We may be recently in ticked or tickless idle mode. At the first
+ * busy tick after returning from idle, we will update the busy stats.
+ */
set_cpu_sd_state_busy();
nohz_balance_exit_idle(cpu);
@@ -7326,7 +7361,7 @@ need_kick:
return 1;
}
#else
-static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
+static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle, struct cpumask *mask) { }
#endif
/*
--
1.7.9.5
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists