[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1462226078-31904-9-git-send-email-yuyang.du@intel.com>
Date: Tue, 3 May 2016 05:54:34 +0800
From: Yuyang Du <yuyang.du@...el.com>
To: peterz@...radead.org, mingo@...nel.org,
linux-kernel@...r.kernel.org
Cc: bsegall@...gle.com, pjt@...gle.com, morten.rasmussen@....com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
juri.lelli@....com, Yuyang Du <yuyang.du@...el.com>
Subject: [PATCH v2 08/12] sched/fair: Remove SCHED_LOAD_SHIFT and SCHED_LOAD_SCALE
After cleaning up the sched metrics, these two definitions that cause
ambiguity are not needed any more. Use NICE_0_LOAD_SHIFT and NICE_0_LOAD
instead (the names suggest clearly who they are).
Suggested-by: Ben Segall <bsegall@...gle.com>
Signed-off-by: Yuyang Du <yuyang.du@...el.com>
---
kernel/sched/fair.c | 4 ++--
kernel/sched/sched.h | 22 +++++++++++-----------
2 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 69bfb07..fa79820 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -721,7 +721,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
- long cap = (long)(scale_load_down(SCHED_LOAD_SCALE) - cfs_rq->avg.util_avg) / 2;
+ long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
if (cap > 0) {
if (cfs_rq->avg.util_avg != 0) {
@@ -7017,7 +7017,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
if (busiest->group_type == group_overloaded &&
local->group_type == group_overloaded) {
load_above_capacity = busiest->sum_nr_running *
- SCHED_LOAD_SCALE;
+ scale_load_down(NICE_0_LOAD);
if (load_above_capacity > busiest->group_capacity)
load_above_capacity -= busiest->group_capacity;
else
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 996a137..1a3be6f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -54,25 +54,25 @@ static inline void cpu_load_update_active(struct rq *this_rq) { }
* increased costs.
*/
#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
-# define SCHED_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
+# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
#else
-# define SCHED_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
+# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
# define scale_load(w) (w)
# define scale_load_down(w) (w)
#endif
-#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
-
/*
- * NICE_0's weight (visible to user) and its load (invisible to user) have
- * independent ranges, but they should be well calibrated. We use scale_load()
- * and scale_load_down(w) to convert between them, the following must be true:
- * scale_load(sched_prio_to_weight[20]) == NICE_0_LOAD
+ * Task weight (visible to user) and its load (invisible to user) have
+ * independent resolution, but they should be well calibrated. We use
+ * scale_load() and scale_load_down(w) to convert between them. The
+ * following must be true:
+ *
+ * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
+ *
*/
-#define NICE_0_LOAD SCHED_LOAD_SCALE
-#define NICE_0_SHIFT SCHED_LOAD_SHIFT
+#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
/*
* Single value that decides SCHED_DEADLINE internal math precision.
@@ -861,7 +861,7 @@ DECLARE_PER_CPU(struct sched_domain *, sd_asym);
struct sched_group_capacity {
atomic_t ref;
/*
- * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
+ * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
* for a single CPU.
*/
unsigned int capacity;
--
1.7.9.5
Powered by blists - more mailing lists