lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 9 May 2012 23:43:10 +0200
From:	"Rafael J. Wysocki" <rjw@...k.pl>
To:	Linux PM list <linux-pm@...r.kernel.org>
Cc:	LKML <linux-kernel@...r.kernel.org>, Len Brown <lenb@...nel.org>,
	Colin Cross <ccross@...roid.com>,
	Kevin Hilman <khilman@...com>,
	Magnus Damm <magnus.damm@...il.com>,
	Arjan van de Ven <arjan@...ux.intel.com>,
	Santosh Shilimkar <santosh.shilimkar@...com>
Subject: [RFC][PATCH 2/2] PM / Domains: Add preliminary cpuidle support

From: Rafael J. Wysocki <rjw@...k.pl>

On some systems there are CPU cores located in the same power
domains as I/O devices.  Then, power can only be removed from the
domain if all I/O devices in it are not in use and the CPU core
is idle.  Add preliminary support for that to the generic PM domains
framework.

This assumes that there is only one CPU core in the system and it is
supposed to be set up in the following way.

First, the platform is expected to provide a cpuidle driver with one
extra state designated for the generic PM domains code to handle.
This state should be initially disabled and its exit_latency value
should be set to whatever time is needed to bring up the CPU core
itself after restoring power to it, not including the domain's
power on latency.  Its .enter() callback should point to a procedure
that will save the CPU core's state as appropriate before power
removal.  On success, it should return the same value as it has
been passed as its third argument, but it shouldn't put the CPU
core into a C-state.  If it is about to return the index of
a different cpuidle state, however, it should make sure that the CPU
be put into that state before it returns.

The remaining characteristics of the extra cpuidle state, referred to
as the "domain" cpuidle state below, (e.g. power usage, target
residency) should be populated in accordance with the properties of
the hardware.

Next, the platform should execute genpd_attach_cpuidle() on the PM
domain containing the CPU core.  That will cause the generic PM
domains framework to treat that domain in a special way such that:

 * When all devices in the domain have been suspended and it is about
   to be turned off, the states of the devices will be saved, but
   power will not be removed from the domain.  Instead, the "domain"
   cpuidle state will be enabled so that power can be removed from
   the domain when the CPU core is idle and the state has been chosen
   as the target by the cpuidle governor.  In that case, before
   removing power from the domain, the framework will execute the
   .enter() callback initially defined for the "domain" state.

 * When the first I/O device in the domain is resumed and
   __pm_genpd_poweron(() is called for the first time after
   power has been removed from the domain, the "domain" cpuidle
   state will be disabled to avoid subsequent surprise power removals
   via cpuidle.

---
 drivers/base/power/domain.c |  152 +++++++++++++++++++++++++++++++++++++++++++-
 include/linux/cpuidle.h     |    2 
 include/linux/pm_domain.h   |   20 +++++
 3 files changed, 173 insertions(+), 1 deletion(-)

Index: linux/include/linux/pm_domain.h
===================================================================
--- linux.orig/include/linux/pm_domain.h
+++ linux/include/linux/pm_domain.h
@@ -15,6 +15,7 @@
 #include <linux/err.h>
 #include <linux/of.h>
 #include <linux/notifier.h>
+#include <linux/cpuidle.h>
 
 enum gpd_status {
 	GPD_STATE_ACTIVE = 0,	/* PM domain is active */
@@ -45,6 +46,14 @@ struct gpd_dev_ops {
 	bool (*active_wakeup)(struct device *dev);
 };
 
+struct gpd_cpu_data {
+	unsigned int saved_exit_latency;
+	struct cpuidle_state *idle_state;
+	int (*idle_enter)(struct cpuidle_device *dev,
+			  struct cpuidle_driver *drv,
+			  int index);
+};
+
 struct generic_pm_domain {
 	struct dev_pm_domain domain;	/* PM domain operations */
 	struct list_head gpd_list_node;	/* Node in the global PM domains list */
@@ -75,6 +84,7 @@ struct generic_pm_domain {
 	bool max_off_time_changed;
 	bool cached_power_down_ok;
 	struct device_node *of_node; /* Node in device tree */
+	struct gpd_cpu_data *cpu_data;
 };
 
 static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -154,6 +164,8 @@ extern int pm_genpd_add_callbacks(struct
 				  struct gpd_dev_ops *ops,
 				  struct gpd_timing_data *td);
 extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
+extern int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
+extern int genpd_detach_cpuidle(struct generic_pm_domain *genpd);
 extern void pm_genpd_init(struct generic_pm_domain *genpd,
 			  struct dev_power_governor *gov, bool is_off);
 
@@ -209,6 +221,14 @@ static inline int __pm_genpd_remove_call
 {
 	return -ENOSYS;
 }
+static inline int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
+{
+	return -ENOSYS;
+}
+static inline int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+{
+	return -ENOSYS;
+}
 static inline void pm_genpd_init(struct generic_pm_domain *genpd,
 				 struct dev_power_governor *gov, bool is_off)
 {
Index: linux/drivers/base/power/domain.c
===================================================================
--- linux.orig/drivers/base/power/domain.c
+++ linux/drivers/base/power/domain.c
@@ -139,6 +139,19 @@ static void genpd_set_active(struct gene
 		genpd->status = GPD_STATE_ACTIVE;
 }
 
+static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
+{
+	s64 usecs64;
+
+	if (!genpd->cpu_data)
+		return;
+
+	usecs64 = genpd->power_on_latency_ns;
+	do_div(usecs64, NSEC_PER_USEC);
+	usecs64 += genpd->cpu_data->saved_exit_latency;
+	genpd->cpu_data->idle_state->exit_latency = usecs64;
+}
+
 /**
  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
  * @genpd: PM domain to power up.
@@ -203,7 +216,11 @@ int __pm_genpd_poweron(struct generic_pm
 		}
 	}
 
-	if (genpd->power_on) {
+	if (genpd->cpu_data) {
+		cpuidle_pause_and_lock();
+		genpd->cpu_data->idle_state->disable = true;
+		cpuidle_resume_and_unlock();
+	} else if (genpd->power_on) {
 		ktime_t time_start = ktime_get();
 		s64 elapsed_ns;
 
@@ -215,6 +232,7 @@ int __pm_genpd_poweron(struct generic_pm
 		if (elapsed_ns > genpd->power_on_latency_ns) {
 			genpd->power_on_latency_ns = elapsed_ns;
 			genpd->max_off_time_changed = true;
+			genpd_recalc_cpu_exit_latency(genpd);
 			if (genpd->name)
 				pr_warning("%s: Power-on latency exceeded, "
 					"new value %lld ns\n", genpd->name,
@@ -464,6 +482,21 @@ static int pm_genpd_poweroff(struct gene
 		}
 	}
 
+	if (genpd->cpu_data) {
+		/*
+		 * If cpu_data is set, cpuidle should turn the domain off when
+		 * the CPU in it is idle.  In that case we don't decrement the
+		 * subdomain counts of the master domains, so that power is not
+		 * removed from the current domain prematurely as a result of
+		 * cutting off the masters' power.
+		 */
+		genpd->status = GPD_STATE_POWER_OFF;
+		cpuidle_pause_and_lock();
+		genpd->cpu_data->idle_state->disable = false;
+		cpuidle_resume_and_unlock();
+		goto out;
+	}
+
 	if (genpd->power_off) {
 		ktime_t time_start;
 		s64 elapsed_ns;
@@ -1597,6 +1630,123 @@ int __pm_genpd_remove_callbacks(struct d
 }
 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
 
+int genpd_cpuidle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+			int index)
+{
+	struct cpuidle_state *target = &drv->states[index];
+	struct generic_pm_domain *genpd = target->platform_data;
+	struct gpd_cpu_data *cpu_data;
+	int state;
+
+	if (!genpd || !genpd->cpu_data)
+		goto fall_back;
+
+	cpu_data = genpd->cpu_data;
+	state = cpu_data->idle_enter(dev, drv, index);
+	if (state != index)
+		return state;
+
+	if (!genpd->power_off)
+		goto fall_back;
+
+	/*
+	 * We can safely power off here, because __pm_genpd_poweron(() has to
+	 * run in process context, so the CPU has to exit idle before that
+	 * function runs.
+	 */
+	if (!genpd->power_off(genpd))
+		return index;
+
+ fall_back:
+	index = drv->safe_state_index;
+	return drv->states[index].enter(dev, drv, index);
+}
+
+int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+{
+	struct cpuidle_driver *cpuidle_drv;
+	struct gpd_cpu_data *cpu_data;
+	struct cpuidle_state *idle_state;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(genpd) || state < 0)
+		return -EINVAL;
+
+	genpd_acquire_lock(genpd);
+
+	if (genpd->cpu_data) {
+		ret = -EEXIST;
+		goto out;
+	}
+	cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
+	if (!cpu_data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	cpuidle_drv = cpuidle_driver_ref();
+	if (!cpuidle_drv) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (cpuidle_drv->state_count <= state) {
+		ret = -EINVAL;
+		goto err;
+	}
+	idle_state = &cpuidle_drv->states[state];
+	if (!idle_state->disable) {
+		ret = -EAGAIN;
+		goto err;
+	}
+	cpu_data->idle_state = idle_state;
+	cpu_data->saved_exit_latency = idle_state->exit_latency;
+	cpu_data->idle_enter = idle_state->enter;
+	genpd->cpu_data = cpu_data;
+	idle_state->platform_data = genpd;
+	idle_state->enter = genpd_cpuidle_enter;
+	genpd_recalc_cpu_exit_latency(genpd);
+
+ out:
+	genpd_release_lock(genpd);
+	return ret;
+
+ err:
+	cpuidle_driver_unref();
+	goto out;
+}
+
+int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+{
+	struct gpd_cpu_data *cpu_data;
+	struct cpuidle_state *idle_state;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(genpd))
+		return -EINVAL;
+
+	genpd_acquire_lock(genpd);
+
+	cpu_data = genpd->cpu_data;
+	if (!cpu_data) {
+		ret = -ENODEV;
+		goto out;
+	}
+	idle_state = cpu_data->idle_state;
+	if (!idle_state->disable) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	idle_state->enter = cpu_data->idle_enter;
+	idle_state->exit_latency = cpu_data->saved_exit_latency;
+	idle_state->platform_data = NULL;
+	cpuidle_driver_unref();
+	genpd->cpu_data = NULL;
+	kfree(cpu_data);
+
+ out:
+	genpd_release_lock(genpd);
+	return ret;
+}
+
 /* Default device callbacks for generic PM domains. */
 
 /**
Index: linux/include/linux/cpuidle.h
===================================================================
--- linux.orig/include/linux/cpuidle.h
+++ linux/include/linux/cpuidle.h
@@ -53,6 +53,8 @@ struct cpuidle_state {
 			int index);
 
 	int (*enter_dead) (struct cpuidle_device *dev, int index);
+
+	void		*platform_data;
 };
 
 /* Idle State Flags */

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ