[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20140811113616.31956.17605.stgit@preeti.in.ibm.com>
Date: Mon, 11 Aug 2014 17:06:26 +0530
From: Preeti U Murthy <preeti@...ux.vnet.ibm.com>
To: alex.shi@...el.com, vincent.guittot@...aro.org,
peterz@...radead.org, pjt@...gle.com, efault@....de,
rjw@...ysocki.net, morten.rasmussen@....com,
svaidy@...ux.vnet.ibm.com, arjan@...ux.intel.com, mingo@...nel.org
Cc: nicolas.pitre@...aro.org, len.brown@...el.com, yuyang.du@...el.com,
linaro-kernel@...ts.linaro.org, daniel.lezcano@...aro.org,
corbet@....net, catalin.marinas@....com, markgross@...gnar.org,
sundar.iyer@...el.com, linux-kernel@...r.kernel.org,
dietmar.eggemann@....com, Lorenzo.Pieralisi@....com,
mike.turquette@...aro.org, akpm@...ux-foundation.org,
paulmck@...ux.vnet.ibm.com, tglx@...utronix.de
Subject: [RFC PATCH V2 08/19] sched: move sg/sd_lb_stats struct ahead
From: Alex Shi <alex.shi@...el.com>
Power aware fork/exec/wake balancing needs both of structs in incoming
patches. So move ahead before it.
Signed-off-by: Alex Shi <alex.shi@...el.com>
Signed-off-by: Preeti U Murthy <preeti@...ux.vnet.ibm.com>
---
kernel/sched/fair.c | 89 ++++++++++++++++++++++++++-------------------------
1 file changed, 45 insertions(+), 44 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3d6d081..031d115 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4505,6 +4505,51 @@ done:
}
/*
+ * sg_lb_stats - stats of a sched_group required for load_balancing
+ */
+struct sg_lb_stats {
+ unsigned long avg_load; /*Avg load across the CPUs of the group */
+ unsigned long group_load; /* Total load over the CPUs of the group */
+ unsigned long sum_weighted_load; /* Weighted load of group's tasks */
+ unsigned long load_per_task;
+ unsigned long group_capacity;
+ unsigned int sum_nr_running; /* Nr tasks running in the group */
+ unsigned int group_capacity_factor;
+ unsigned int idle_cpus;
+ unsigned int group_weight;
+ int group_imb; /* Is there an imbalance in the group ? */
+ int group_has_free_capacity;
+#ifdef CONFIG_NUMA_BALANCING
+ unsigned int nr_numa_running;
+ unsigned int nr_preferred_running;
+#endif
+#ifdef CONFIG_SCHED_POWER
+ unsigned int group_util; /* sum utilization of group */
+#endif
+};
+
+/*
+ * sd_lb_stats - Structure to store the statistics of a sched_domain
+ * during load balancing.
+ */
+struct sd_lb_stats {
+ struct sched_group *busiest; /* Busiest group in this sd */
+ struct sched_group *local; /* Local group in this sd */
+ unsigned long total_load; /* Total load of all groups in sd */
+ unsigned long total_capacity; /* Total capacity of all groups in sd */
+ unsigned long avg_load; /* Average load across all groups in sd */
+
+ struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
+ struct sg_lb_stats local_stat; /* Statistics of the local group */
+
+#ifdef CONFIG_SCHED_POWER
+ /* Varibles of power aware scheduling */
+ unsigned int sd_util; /* sum utilization of this domain */
+ struct sched_group *group_leader; /* Group which relieves group_min */
+#endif
+};
+
+/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
* that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
* SD_BALANCE_FORK, or SD_BALANCE_EXEC.
@@ -5574,50 +5619,6 @@ static unsigned long task_h_load(struct task_struct *p)
#endif
/********** Helpers for find_busiest_group ************************/
-/*
- * sg_lb_stats - stats of a sched_group required for load_balancing
- */
-struct sg_lb_stats {
- unsigned long avg_load; /*Avg load across the CPUs of the group */
- unsigned long group_load; /* Total load over the CPUs of the group */
- unsigned long sum_weighted_load; /* Weighted load of group's tasks */
- unsigned long load_per_task;
- unsigned long group_capacity;
- unsigned int sum_nr_running; /* Nr tasks running in the group */
- unsigned int group_capacity_factor;
- unsigned int idle_cpus;
- unsigned int group_weight;
- int group_imb; /* Is there an imbalance in the group ? */
- int group_has_free_capacity;
-#ifdef CONFIG_NUMA_BALANCING
- unsigned int nr_numa_running;
- unsigned int nr_preferred_running;
-#endif
-#ifdef CONFIG_SCHED_POWER
- unsigned int group_util; /* sum utilization of group */
-#endif
-};
-
-/*
- * sd_lb_stats - Structure to store the statistics of a sched_domain
- * during load balancing.
- */
-struct sd_lb_stats {
- struct sched_group *busiest; /* Busiest group in this sd */
- struct sched_group *local; /* Local group in this sd */
- unsigned long total_load; /* Total load of all groups in sd */
- unsigned long total_capacity; /* Total capacity of all groups in sd */
- unsigned long avg_load; /* Average load across all groups in sd */
-
- struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
- struct sg_lb_stats local_stat; /* Statistics of the local group */
-
-#ifdef CONFIG_SCHED_POWER
- /* Varibles of power aware scheduling */
- unsigned int sd_util; /* sum utilization of this domain */
- struct sched_group *group_leader; /* Group which relieves group_min */
-#endif
-};
static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
{
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists