[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210212141744.24284-7-vincent.guittot@linaro.org>
Date: Fri, 12 Feb 2021 15:17:43 +0100
From: Vincent Guittot <vincent.guittot@...aro.org>
To: mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
mgorman@...e.de, bristot@...hat.com, linux-kernel@...r.kernel.org,
joel@...lfernandes.org, valentin.schneider@....com
Cc: fweisbec@...il.com, tglx@...utronix.de, qais.yousef@....com,
Vincent Guittot <vincent.guittot@...aro.org>
Subject: [PATCH 6/7 v3] sched/fair: trigger the update of blocked load on newly idle cpu
Instead of waking up a random and already idle CPU, we can take advantage
of this_cpu being about to enter idle to run the ILB and update the
blocked load.
Signed-off-by: Vincent Guittot <vincent.guittot@...aro.org>
---
kernel/sched/fair.c | 24 +++++++++++++++++++++---
kernel/sched/idle.c | 6 ++++++
kernel/sched/sched.h | 5 +++++
3 files changed, 32 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5d285d93e433..cd0ea635225e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10453,6 +10453,24 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
return true;
}
+/*
+ * Check if we need to run the ILB for updating blocked load before entering
+ * idle state.
+ */
+void nohz_run_idle_balance(int cpu)
+{
+ unsigned int flags;
+
+ flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
+
+ if (flags && !need_resched()) {
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->nohz_idle_balance = flags;
+ nohz_idle_balance(rq, CPU_IDLE);
+ }
+}
+
static void nohz_newidle_balance(struct rq *this_rq)
{
int this_cpu = this_rq->cpu;
@@ -10474,10 +10492,10 @@ static void nohz_newidle_balance(struct rq *this_rq)
return;
/*
- * Blocked load of idle CPUs need to be updated.
- * Kick an ILB to update statistics.
+ * Set the need to trigger ILB in order to update blocked load
+ * before entering idle state.
*/
- kick_ilb(NOHZ_STATS_KICK);
+ atomic_or(NOHZ_STATS_KICK, nohz_flags(this_cpu));
}
#else /* !CONFIG_NO_HZ_COMMON */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 305727ea0677..52a4e9ce2f9b 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -261,6 +261,12 @@ static void cpuidle_idle_call(void)
static void do_idle(void)
{
int cpu = smp_processor_id();
+
+ /*
+ * Check if we need to update some blocked load
+ */
+ nohz_run_idle_balance(cpu);
+
/*
* If the arch has a polling bit, we maintain an invariant:
*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6edc67df3554..17de50acb88d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2374,6 +2374,11 @@ extern void nohz_balance_exit_idle(struct rq *rq);
static inline void nohz_balance_exit_idle(struct rq *rq) { }
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern void nohz_run_idle_balance(int cpu);
+#else
+static inline void nohz_run_idle_balance(int cpu) { }
+#endif
#ifdef CONFIG_SMP
static inline
--
2.17.1
Powered by blists - more mailing lists