[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6030952.MhkbZ0Pkbq@rjwysocki.net>
Date: Thu, 13 Mar 2025 21:35:17 +0100
From: "Rafael J. Wysocki" <rjw@...ysocki.net>
To: Linux PM <linux-pm@...r.kernel.org>
Cc: LKML <linux-kernel@...r.kernel.org>,
Alan Stern <stern@...land.harvard.edu>, Ulf Hansson <ulf.hansson@...aro.org>,
Johan Hovold <johan@...nel.org>,
Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>,
Jon Hunter <jonathanh@...dia.com>, Saravana Kannan <saravanak@...gle.com>
Subject: [PATCH v2 3/3] PM: sleep: Make suspend of devices more asynchronous
From: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
In analogy with previous changes, make device_suspend_late() and
device_suspend_noirq() start the async suspend of the device's parent
and suppliers after the device itself has been processed and make
dpm_suspend_late() and dpm_noirq_suspend_devices() start processing
"async" leaf devices (that is, devices without children or consumers)
upfront because they don't need to wait for any other devices.
This change reduces the total duration of device suspend on some systems
where it has been tested measurably, but not significantly.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
Suggested-by: Saravana Kannan <saravanak@...gle.com>
---
v1 -> v2:
* Adjust for the changes in patches [1-2/3].
* Move all devices to the target lists even if there are errors in
dpm_suspend_late() and dpm_noirq_suspend_devices() so they are
properly resumed during rollback (Saravana).
---
drivers/base/power/main.c | 68 +++++++++++++++++++++++++++++++++++++---------
1 file changed, 56 insertions(+), 12 deletions(-)
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1312,6 +1312,8 @@
device_links_read_unlock(idx);
}
+static void async_suspend_noirq(void *data, async_cookie_t cookie);
+
/**
* device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1390,7 +1392,13 @@
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_superior(dev, async_suspend_noirq);
+
+ return 0;
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
@@ -1404,6 +1412,7 @@
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
@@ -1413,12 +1422,28 @@
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront because they don't need
+ * to wait.
+ */
+ list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_noirq);
+ }
+
while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.prev);
+ dev = to_device(dpm_late_early_list.prev);
list_move(&dev->power.entry, &dpm_noirq_list);
- dpm_clear_async_state(dev);
+ /*
+ * Move all devices to the target list to resume them properly
+ * on errors.
+ */
+ if (error || async_error)
+ break;
+
if (dpm_async_fn(dev, async_suspend_noirq))
continue;
@@ -1431,9 +1456,6 @@
put_device(dev);
mutex_lock(&dpm_list_mtx);
-
- if (error || async_error)
- break;
}
mutex_unlock(&dpm_list_mtx);
@@ -1486,6 +1508,8 @@
spin_unlock_irq(&parent->power.lock);
}
+static void async_suspend_late(void *data, async_cookie_t cookie);
+
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1562,7 +1586,13 @@
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_superior(dev, async_suspend_late);
+
+ return 0;
}
static void async_suspend_late(void *data, async_cookie_t cookie)
@@ -1580,6 +1610,7 @@
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
@@ -1591,12 +1622,28 @@
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront because they don't need
+ * to wait.
+ */
+ list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_late);
+ }
+
while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ dev = to_device(dpm_suspended_list.prev);
list_move(&dev->power.entry, &dpm_late_early_list);
- dpm_clear_async_state(dev);
+ /*
+ * Move all devices to the target list to resume them properly
+ * on errors.
+ */
+ if (error || async_error)
+ continue;
+
if (dpm_async_fn(dev, async_suspend_late))
continue;
@@ -1609,9 +1656,6 @@
put_device(dev);
mutex_lock(&dpm_list_mtx);
-
- if (error || async_error)
- break;
}
mutex_unlock(&dpm_list_mtx);
Powered by blists - more mailing lists