lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <201001240038.56372.rjw@sisk.pl>
Date:	Sun, 24 Jan 2010 00:38:56 +0100
From:	"Rafael J. Wysocki" <rjw@...k.pl>
To:	LKML <linux-kernel@...r.kernel.org>
Cc:	Alan Stern <stern@...land.harvard.edu>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Linux PCI <linux-pci@...r.kernel.org>,
	pm list <linux-pm@...ts.linux-foundation.org>,
	linux-usb@...r.kernel.org, Greg KH <gregkh@...e.de>,
	Jesse Barnes <jbarnes@...tuousgeek.org>,
	James Bottomley <James.Bottomley@...e.de>,
	Linux SCSI <linux-scsi@...r.kernel.org>,
	Arjan van de Ven <arjan@...radead.org>,
	ACPI Devel Maling List <linux-acpi@...r.kernel.org>,
	Len Brown <lenb@...nel.org>,
	Nigel Cunningham <ncunningham@...a.org.au>
Subject: [PATCH 5/8] PM: Start asynchronous resume threads upfront

From: Rafael J. Wysocki <rjw@...k.pl>

It has been shown by testing that total device resume time can be
reduced significantly (by as much as 50% or more) if the async
threads executing some devices' resume routines are all started
before the main resume thread starts to handle the "synchronous"
devices.

This is a consequence of the fact that the slowest devices tend to be
located at the end of dpm_list, so their resume routines are started
very late.  Consequently, they have to wait for all the preceding
"synchronous" devices before their resume routines can be started
by the main resume thread, even if they are "asynchronous".  By
starting their async threads upfront we effectively move those
devices towards the beginning of dpm_list, without breaking their
ordering with respect to their parents and children.  As a result,
their resume routines are started much earlier and we are able to
save much more device resume time this way.

Signed-off-by: Rafael J. Wysocki <rjw@...k.pl>
---
 drivers/base/power/main.c |   43 ++++++++++++++++++++++++-------------------
 1 file changed, 24 insertions(+), 19 deletions(-)

Index: linux-2.6/drivers/base/power/main.c
===================================================================
--- linux-2.6.orig/drivers/base/power/main.c
+++ linux-2.6/drivers/base/power/main.c
@@ -495,12 +495,12 @@ static int legacy_resume(struct device *
 }
 
 /**
- * __device_resume - Execute "resume" callbacks for given device.
+ * device_resume - Execute "resume" callbacks for given device.
  * @dev: Device to handle.
  * @state: PM transition of the system being carried out.
  * @async: If true, the device is being resumed asynchronously.
  */
-static int __device_resume(struct device *dev, pm_message_t state, bool async)
+static int device_resume(struct device *dev, pm_message_t state, bool async)
 {
 	int error = 0;
 
@@ -510,6 +510,8 @@ static int __device_resume(struct device
 	dpm_wait(dev->parent, async);
 	down(&dev->sem);
 
+	dev->power.status = DPM_RESUMING;
+
 	if (dev->bus) {
 		if (dev->bus->pm) {
 			pm_dev_dbg(dev, state, "");
@@ -553,24 +555,16 @@ static void async_resume(void *data, asy
 	struct device *dev = (struct device *)data;
 	int error;
 
-	error = __device_resume(dev, pm_transition, true);
+	error = device_resume(dev, pm_transition, true);
 	if (error)
 		pm_dev_err(dev, pm_transition, " async", error);
 	put_device(dev);
 }
 
-static int device_resume(struct device *dev)
+static bool is_async(struct device *dev)
 {
-	INIT_COMPLETION(dev->power.completion);
-
-	if (pm_async_enabled && dev->power.async_suspend
-	    && !pm_trace_is_enabled()) {
-		get_device(dev);
-		async_schedule(async_resume, dev);
-		return 0;
-	}
-
-	return __device_resume(dev, pm_transition, false);
+	return dev->power.async_suspend && pm_async_enabled
+		&& !pm_trace_is_enabled();
 }
 
 /**
@@ -583,22 +577,33 @@ static int device_resume(struct device *
 static void dpm_resume(pm_message_t state)
 {
 	struct list_head list;
+	struct device *dev;
 	ktime_t starttime = ktime_get();
 
 	INIT_LIST_HEAD(&list);
 	mutex_lock(&dpm_list_mtx);
 	pm_transition = state;
-	while (!list_empty(&dpm_list)) {
-		struct device *dev = to_device(dpm_list.next);
 
+	list_for_each_entry(dev, &dpm_list, power.entry) {
+		if (dev->power.status < DPM_OFF)
+			continue;
+
+		INIT_COMPLETION(dev->power.completion);
+		if (is_async(dev)) {
+			get_device(dev);
+			async_schedule(async_resume, dev);
+		}
+	}
+
+	while (!list_empty(&dpm_list)) {
+		dev = to_device(dpm_list.next);
 		get_device(dev);
-		if (dev->power.status >= DPM_OFF) {
+		if (dev->power.status >= DPM_OFF && !is_async(dev)) {
 			int error;
 
-			dev->power.status = DPM_RESUMING;
 			mutex_unlock(&dpm_list_mtx);
 
-			error = device_resume(dev);
+			error = device_resume(dev, state, false);
 
 			mutex_lock(&dpm_list_mtx);
 			if (error)

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ