lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <200908122220.03363.rjw@sisk.pl>
Date:	Wed, 12 Aug 2009 22:20:03 +0200
From:	"Rafael J. Wysocki" <rjw@...k.pl>
To:	"linux-pm" <linux-pm@...ts.linux-foundation.org>
Cc:	"linux-acpi" <linux-acpi@...r.kernel.org>,
	Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
	Zhang Rui <rui.zhang@...el.com>, Len Brown <lenb@...nel.org>,
	Alan Stern <stern@...land.harvard.edu>,
	Arjan van de Ven <arjan@...ux.intel.com>
Subject: [RFC][PATCH 1/3] PM: Asynchronous resume of devices

Theoretically, the total time of system sleep transitions (suspend
to RAM, hibernation) can be reduced by running suspend and resume
callbacks of device drivers in parallel with each other.  However,
there are dependencies between devices such that, for example, we may
not be allowed to put one device into a low power state before
anohter one has been suspended (e.g. we cannot suspend a bridge
before suspending all devices behind it).  In particular, we're not
allowed to suspend the parent of a device before suspending the
device itself.  Analogously, we're not allowed to resume a device
before resuming its parent.

Thus, to make it possible to execute suspend and resume callbacks
provided by device drivers in parallel with each other, we need to
provide a synchronization mechanism preventing the dependencies
between devices from being violated.

The patch below introduces a mechanism allowing some devices to be
resumed asynchronously, using completions with the following rules:
(1) There is a completion, dev->power.comp, for each device object.
(2) All of these completions are reset before suspend as well as
    each resume stage (dpm_resume_noirq(), dpm_resume()).
(3) If dev->power.async_suspend is set for dev or for its parent, the
    PM core waits for the parent's completion before attempting to
    run the resume callbacks, appropriate for this particular stage
    of resume, for dev.
(4) dev->power.comp is completed for each device after running its
    resume callbacks.

With this mechanism in place, the drivers wanting their resume
callbacks to be executed asynchronously and knowing that their
devices are not dependent on any other devices indirectly (i.e. in
any way that's not reflected by the structure of the device tree)
can set dev->power.async_suspend for them, with the help of
device_enable_async_suspend().

Since the only dependencies between devices known to the PM core
are the ones reflected by the structure of the device tree, we don't
seem to be able to do any better than this at the core level.

---
 drivers/base/power/main.c |  115 ++++++++++++++++++++++++++++++++++++++++------
 include/linux/device.h    |    6 ++
 include/linux/pm.h        |    4 +
 3 files changed, 111 insertions(+), 14 deletions(-)

Index: linux-2.6/include/linux/pm.h
===================================================================
--- linux-2.6.orig/include/linux/pm.h
+++ linux-2.6/include/linux/pm.h
@@ -26,6 +26,7 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/timer.h>
+#include <linux/completion.h>
 
 /*
  * Callbacks for platform drivers to implement.
@@ -411,9 +412,12 @@ struct dev_pm_info {
 	pm_message_t		power_state;
 	unsigned int		can_wakeup:1;
 	unsigned int		should_wakeup:1;
+	unsigned		async_suspend:1;
 	enum dpm_state		status;		/* Owned by the PM core */
 #ifdef CONFIG_PM_SLEEP
 	struct list_head	entry;
+	struct completion	comp;
+	pm_message_t		async_state;
 #endif
 #ifdef CONFIG_PM_RUNTIME
 	struct timer_list	suspend_timer;
Index: linux-2.6/include/linux/device.h
===================================================================
--- linux-2.6.orig/include/linux/device.h
+++ linux-2.6/include/linux/device.h
@@ -472,6 +472,12 @@ static inline int device_is_registered(s
 	return dev->kobj.state_in_sysfs;
 }
 
+static inline void device_enable_async_suspend(struct device *dev, bool enable)
+{
+	if (dev->power.status == DPM_ON)
+		dev->power.async_suspend = enable;
+}
+
 void driver_init(void);
 
 /*
Index: linux-2.6/drivers/base/power/main.c
===================================================================
--- linux-2.6.orig/drivers/base/power/main.c
+++ linux-2.6/drivers/base/power/main.c
@@ -25,6 +25,8 @@
 #include <linux/resume-trace.h>
 #include <linux/rwsem.h>
 #include <linux/interrupt.h>
+#include <linux/async.h>
+#include <linux/completion.h>
 
 #include "../base.h"
 #include "power.h"
@@ -56,6 +58,7 @@ static bool transition_started;
 void device_pm_init(struct device *dev)
 {
 	dev->power.status = DPM_ON;
+	init_completion(&dev->power.comp);
 	pm_runtime_init(dev);
 }
 
@@ -163,6 +166,34 @@ void device_pm_move_last(struct device *
 	list_move_tail(&dev->power.entry, &dpm_list);
 }
 
+static void dpm_synchronize_noirq(void)
+{
+	struct device *dev;
+
+	async_synchronize_full();
+
+	list_for_each_entry(dev, &dpm_list, power.entry)
+		INIT_COMPLETION(dev->power.comp);
+}
+
+static void dpm_synchronize(void)
+{
+	struct device *dev;
+
+	async_synchronize_full();
+
+	mutex_lock(&dpm_list_mtx);
+	list_for_each_entry(dev, &dpm_list, power.entry)
+		INIT_COMPLETION(dev->power.comp);
+	mutex_unlock(&dpm_list_mtx);
+}
+
+static void device_pm_wait(struct device *dev, struct device *master)
+{
+	if (dev->power.async_suspend || (master && master->power.async_suspend))
+		wait_for_completion(&master->power.comp);
+}
+
 /**
  *	pm_op - execute the PM operation appropiate for given PM event
  *	@dev:	Device.
@@ -329,31 +360,57 @@ static void pm_dev_err(struct device *de
 /*------------------------- Resume routines -------------------------*/
 
 /**
- *	device_resume_noirq - Power on one device (early resume).
- *	@dev:	Device.
- *	@state: PM transition of the system being carried out.
+ * __device_resume_noirq - Execute an "early resume" callback for given device.
+ * @dev: Device to resume.
+ * @state: PM transition of the system being carried out.
  *
- *	Must be called with interrupts disabled.
+ * The driver of the device won't receive interrupts while this function is
+ * being executed.
  */
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int __device_resume_noirq(struct device *dev, pm_message_t state)
 {
 	int error = 0;
 
 	TRACE_DEVICE(dev);
 	TRACE_RESUME(0);
 
-	if (!dev->bus)
-		goto End;
+	device_pm_wait(dev, dev->parent);
 
-	if (dev->bus->pm) {
+	if (dev->bus && dev->bus->pm) {
 		pm_dev_dbg(dev, state, "EARLY ");
 		error = pm_noirq_op(dev, dev->bus->pm, state);
 	}
- End:
+
+	complete_all(&dev->power.comp);
+
 	TRACE_RESUME(error);
 	return error;
 }
 
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	pm_dev_dbg(dev, dev->power.async_state, "async EARLY ");
+	error = __device_resume_noirq(dev, dev->power.async_state);
+	if (error)
+		pm_dev_err(dev, dev->power.async_state, " async EARLY", error);
+	put_device(dev);
+}
+
+static int device_resume_noirq(struct device *dev, pm_message_t state)
+{
+	if (dev->power.async_suspend) {
+		get_device(dev);
+		dev->power.async_state = state;
+		async_schedule(async_resume_noirq, dev);
+		return 0;
+	}
+
+	return __device_resume_noirq(dev, state);
+}
+
 /**
  *	dpm_resume_noirq - Power on all regular (non-sysdev) devices.
  *	@state: PM transition of the system being carried out.
@@ -378,23 +435,25 @@ void dpm_resume_noirq(pm_message_t state
 			if (error)
 				pm_dev_err(dev, state, " early", error);
 		}
+	dpm_synchronize_noirq();
 	mutex_unlock(&dpm_list_mtx);
 	resume_device_irqs();
 }
 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
 
 /**
- *	device_resume - Restore state for one device.
- *	@dev:	Device.
- *	@state: PM transition of the system being carried out.
+ * __device_resume - Execute "resume" callbacks for given device.
+ * @dev: Device to resume.
+ * @state: PM transition of the system being carried out.
  */
-static int device_resume(struct device *dev, pm_message_t state)
+static int __device_resume(struct device *dev, pm_message_t state)
 {
 	int error = 0;
 
 	TRACE_DEVICE(dev);
 	TRACE_RESUME(0);
 
+	device_pm_wait(dev, dev->parent);
 	down(&dev->sem);
 
 	if (dev->bus) {
@@ -429,11 +488,36 @@ static int device_resume(struct device *
 	}
  End:
 	up(&dev->sem);
+	complete_all(&dev->power.comp);
 
 	TRACE_RESUME(error);
 	return error;
 }
 
+static void async_resume(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	pm_dev_dbg(dev, dev->power.async_state, "async ");
+	error = __device_resume(dev, dev->power.async_state);
+	if (error)
+		pm_dev_err(dev, dev->power.async_state, " async", error);
+	put_device(dev);
+}
+
+static int device_resume(struct device *dev, pm_message_t state)
+{
+	if (dev->power.async_suspend) {
+		get_device(dev);
+		dev->power.async_state = state;
+		async_schedule(async_resume, dev);
+		return 0;
+	}
+
+	return __device_resume(dev, state);
+}
+
 /**
  *	dpm_resume - Resume every device.
  *	@state: PM transition of the system being carried out.
@@ -473,6 +557,7 @@ static void dpm_resume(pm_message_t stat
 	}
 	list_splice(&list, &dpm_list);
 	mutex_unlock(&dpm_list_mtx);
+	dpm_synchronize();
 }
 
 /**
@@ -794,8 +879,10 @@ static int dpm_prepare(pm_message_t stat
 			break;
 		}
 		dev->power.status = DPM_SUSPENDING;
-		if (!list_empty(&dev->power.entry))
+		if (!list_empty(&dev->power.entry)) {
 			list_move_tail(&dev->power.entry, &list);
+			INIT_COMPLETION(dev->power.comp);
+		}
 		put_device(dev);
 	}
 	list_splice(&list, &dpm_list);

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ