[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1435018085-7004-2-git-send-email-yuyang.du@intel.com>
Date: Tue, 23 Jun 2015 08:08:02 +0800
From: Yuyang Du <yuyang.du@...el.com>
To: mingo@...nel.org, peterz@...radead.org,
linux-kernel@...r.kernel.org
Cc: pjt@...gle.com, bsegall@...gle.com, morten.rasmussen@....com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
len.brown@...el.com, rafael.j.wysocki@...el.com,
fengguang.wu@...el.com, boqun.feng@...il.com,
srikar@...ux.vnet.ibm.com, Yuyang Du <yuyang.du@...el.com>
Subject: [PATCH v9 1/4] sched: Remove rq's runnable avg
The per-rq runnable averages (rq->avg) was introduced by Ben with this commit:
commit 18bf2805d9b30cb823d4919b42cd230f59c7ce1f
Author: Ben Segall <bsegall@...gle.com>
Date: Thu Oct 4 12:51:20 2012 +0200
sched: Maintain per-rq runnable averages
Since runqueues do not have a corresponding sched_entity we instead embed a
sched_avg structure directly.
Signed-off-by: Ben Segall <bsegall@...gle.com>
Reviewed-by: Paul Turner <pjt@...gle.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Link: http://lkml.kernel.org/r/20120823141506.442637130@google.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
With CONFIG_FAIR_GROUP_SCHED=y, the rq->avg (runnable_avg_sum, avg_period) is
used to calc the contrib in __update_tg_runnable_avg() for the root group.
But the root group's load data is never consumed since its merge into kernel
in 2012. The basic reason is we have just one root group, and it is
meaningless to give it weight. As the extra code is in the scheduler's
hot path, so it is better to remove it.
Signed-off-by: Yuyang Du <yuyang.du@...el.com>
---
kernel/sched/debug.c | 7 +------
kernel/sched/fair.c | 25 ++++---------------------
kernel/sched/sched.h | 2 --
3 files changed, 5 insertions(+), 29 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index f94724e..ca39cb7 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -68,13 +68,8 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
#define PN(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
- if (!se) {
- struct sched_avg *avg = &cpu_rq(cpu)->avg;
- P(avg->runnable_avg_sum);
- P(avg->avg_period);
+ if (!se)
return;
- }
-
PN(se->exec_start);
PN(se->vruntime);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 40a7fcb..7922532 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2728,19 +2728,12 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
}
}
-static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
-{
- __update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg,
- runnable, runnable);
- __update_tg_runnable_avg(&rq->avg, &rq->cfs);
-}
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
int force_update) {}
static inline void __update_tg_runnable_avg(struct sched_avg *sa,
struct cfs_rq *cfs_rq) {}
static inline void __update_group_entity_contrib(struct sched_entity *se) {}
-static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
#endif /* CONFIG_FAIR_GROUP_SCHED */
static inline void __update_task_entity_contrib(struct sched_entity *se)
@@ -2944,7 +2937,6 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
*/
void idle_enter_fair(struct rq *this_rq)
{
- update_rq_runnable_avg(this_rq, 1);
}
/*
@@ -2954,7 +2946,6 @@ void idle_enter_fair(struct rq *this_rq)
*/
void idle_exit_fair(struct rq *this_rq)
{
- update_rq_runnable_avg(this_rq, 0);
}
static int idle_balance(struct rq *this_rq);
@@ -2963,7 +2954,6 @@ static int idle_balance(struct rq *this_rq);
static inline void update_entity_load_avg(struct sched_entity *se,
int update_cfs_rq) {}
-static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
struct sched_entity *se,
int wakeup) {}
@@ -4262,10 +4252,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_entity_load_avg(se, 1);
}
- if (!se) {
- update_rq_runnable_avg(rq, rq->nr_running);
+ if (!se)
add_nr_running(rq, 1);
- }
+
hrtick_update(rq);
}
@@ -4323,10 +4312,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_entity_load_avg(se, 1);
}
- if (!se) {
+ if (!se)
sub_nr_running(rq, 1);
- update_rq_runnable_avg(rq, 1);
- }
+
hrtick_update(rq);
}
@@ -6034,9 +6022,6 @@ static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
*/
if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
list_del_leaf_cfs_rq(cfs_rq);
- } else {
- struct rq *rq = rq_of(cfs_rq);
- update_rq_runnable_avg(rq, rq->nr_running);
}
}
@@ -8020,8 +8005,6 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
if (numabalancing_enabled)
task_tick_numa(rq, curr);
-
- update_rq_runnable_avg(rq, 1);
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f10a445..d465a5c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -595,8 +595,6 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
-
- struct sched_avg avg;
#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
--
2.1.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists