lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 24 Sep 2008 21:48:09 +0530
From:	Vaidyanathan Srinivasan <svaidy@...ux.vnet.ibm.com>
To:	Linux Kernel <linux-kernel@...r.kernel.org>,
	Suresh B Siddha <suresh.b.siddha@...el.com>,
	Venkatesh Pallipadi <venkatesh.pallipadi@...el.com>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	Ingo Molnar <mingo@...e.hu>, Dipankar Sarma <dipankar@...ibm.com>,
	Balbir Singh <balbir@...ux.vnet.ibm.com>,
	Vatsa <vatsa@...ux.vnet.ibm.com>,
	Gautham R Shenoy <ego@...ibm.com>,
	Andi Kleen <andi@...stfloor.org>,
	David Collier-Brown <davecb@....com>,
	Tim Connors <tconnors@...ro.swin.edu.au>,
	Max Krasnyansky <maxk@...lcomm.com>,
	Vaidyanathan Srinivasan <svaidy@...ux.vnet.ibm.com>
Subject: [RFC PATCH v1 3/5] Collect statistics required for powersave balance

Update sched domain level statistics with the minimum load and
group leader who can pull more tasks. Also suggest a powersave
movement if the domain is otherwise balanced.

Signed-off-by: Vaidyanathan Srinivasan <svaidy@...ux.vnet.ibm.com>
---

 kernel/sched.c |   97 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 97 insertions(+), 0 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 8c394a4..dd87061 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3219,6 +3219,103 @@ void update_sd_loads(struct sd_loads *sdl, struct group_loads *gl)
 
 }
 
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+void update_powersavings_group_loads(struct sd_loads *sdl,
+				     struct group_loads *gl,
+				     enum cpu_idle_type idle)
+{
+	int group_capacity = gl->group->__cpu_power / SCHED_LOAD_SCALE;
+
+	/*
+	 * Busy processors will not participate in power savings
+	 * balance.
+	 */
+	if (idle == CPU_NOT_IDLE ||
+			!(sdl->sd->flags & SD_POWERSAVINGS_BALANCE))
+		return;
+
+	/*
+	 * If the local group is idle or completely loaded
+	 * no need to do power savings balance at this domain
+	 * Same case of non-local groups as well
+	 */
+	if (gl->nr_running >= group_capacity || gl->nr_running == 0)
+		return;
+
+	/*
+	 * Calculate the group which has the least non-idle load.
+	 * This is the group from where we need to pick up the load
+	 * for saving power
+	 */
+	if (!sdl->min_load_group.group)
+		sdl->min_load_group = *gl;
+	else {
+		if (gl->nr_running < sdl->min_load_group.nr_running)
+			sdl->min_load_group = *gl;
+		/* If the loads are equal, then prefer the cpu with
+		 * less logical number
+		 */
+		else if (gl->nr_running == sdl->min_load_group.nr_running &&
+			 first_cpu(gl->group->cpumask) <
+			 first_cpu(sdl->min_load_group.group->cpumask))
+			sdl->min_load_group = *gl;
+	}
+
+	/*
+	 * Calculate the group which is almost near its
+	 * capacity but still has some space to pick up some load
+	 * from other group and save more power
+	 */
+
+	if (gl->nr_running > 0 && gl->nr_running <= group_capacity - 1) {
+		if (!sdl->power_save_leader_group.group)
+			sdl->power_save_leader_group = *gl;
+		else {
+			if (gl->nr_running >
+			    sdl->power_save_leader_group.nr_running)
+				sdl->power_save_leader_group = *gl;
+			else if (gl->nr_running ==
+				 sdl->power_save_leader_group.nr_running &&
+				 first_cpu(gl->group->cpumask) <
+				 first_cpu(sdl->min_load_group.group->cpumask))
+				sdl->power_save_leader_group = *gl;
+		}
+	}
+}
+
+static struct sched_group *powersavings_balance_group(struct sd_loads *sdl,
+	struct group_loads *gl, enum cpu_idle_type idle,
+	unsigned long *imbalance)
+{
+	*imbalance = 0;
+	if (idle == CPU_NOT_IDLE || !(sdl->sd->flags & SD_POWERSAVINGS_BALANCE))
+		return NULL;
+
+	if (sdl->local.group == sdl->power_save_leader_group.group &&
+		sdl->power_save_leader_group.group !=
+		sdl->min_load_group.group) {
+		*imbalance = sdl->min_load_group.avg_load_per_task;
+		return sdl->min_load_group.group;
+	}
+
+	return NULL;
+}
+#else
+void update_powersavings_group_loads(struct sd_loads *sdl,
+			struct group_loads *gl, enum cpu_idle_type idle)
+{
+	return;
+}
+
+static struct sched_group *powersavings_balance_group(struct sd_loads *sdl,
+	struct group_loads *gl, enum cpu_idle_type idle,
+	unsigned long *imbalance)
+{
+	*imbalance = 0;
+	return NULL;
+}
+#endif
+
 /*
  * find_busiest_group finds and returns the busiest CPU group within the
  * domain. It calculates and returns the amount of weighted load which

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ