[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1541767840-93588-6-git-send-email-steven.sistare@oracle.com>
Date: Fri, 9 Nov 2018 04:50:35 -0800
From: Steve Sistare <steven.sistare@...cle.com>
To: mingo@...hat.com, peterz@...radead.org
Cc: subhra.mazumdar@...cle.com, dhaval.giani@...cle.com,
daniel.m.jordan@...cle.com, pavel.tatashin@...rosoft.com,
matt@...eblueprint.co.uk, umgwanakikbuti@...il.com,
riel@...hat.com, jbacik@...com, juri.lelli@...hat.com,
valentin.schneider@....com, vincent.guittot@...aro.org,
quentin.perret@....com, steven.sistare@...cle.com,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 05/10] sched/fair: Hoist idle_stamp up from idle_balance
Move the update of idle_stamp from idle_balance to the call site in
pick_next_task_fair, to prepare for a future patch that adds work to
pick_next_task_fair which must be included in the idle_stamp interval.
No functional change.
Signed-off-by: Steve Sistare <steven.sistare@...cle.com>
---
kernel/sched/fair.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9031d39..da368ed 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3725,6 +3725,8 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
rq->misfit_task_load = task_h_load(p);
}
+#define IF_SMP(statement) statement
+
static void overload_clear(struct rq *rq)
{
struct sparsemask *overload_cpus;
@@ -3770,6 +3772,8 @@ static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
return 0;
}
+#define IF_SMP(statement) /* empty */
+
static inline void overload_clear(struct rq *rq) {}
static inline void overload_set(struct rq *rq) {}
@@ -6764,8 +6768,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
idle:
update_misfit_status(NULL, rq);
+
+ /*
+ * We must set idle_stamp _before_ calling idle_balance(), such that we
+ * measure the duration of idle_balance() as idle time.
+ */
+ IF_SMP(rq->idle_stamp = rq_clock(rq);)
+
new_tasks = idle_balance(rq, rf);
+ if (new_tasks)
+ IF_SMP(rq->idle_stamp = 0;)
+
/*
* Because idle_balance() releases (and re-acquires) rq->lock, it is
* possible for any higher priority task to appear. In that case we
@@ -9611,12 +9625,6 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
u64 curr_cost = 0;
/*
- * We must set idle_stamp _before_ calling idle_balance(), such that we
- * measure the duration of idle_balance() as idle time.
- */
- this_rq->idle_stamp = rq_clock(this_rq);
-
- /*
* Do not pull tasks towards !active CPUs...
*/
if (!cpu_active(this_cpu))
@@ -9707,9 +9715,6 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
pulled_task = -1;
- if (pulled_task)
- this_rq->idle_stamp = 0;
-
rq_repin_lock(this_rq, rf);
return pulled_task;
--
1.8.3.1
Powered by blists - more mailing lists