[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8536271.NyiUUSuA9g@rjwysocki.net>
Date: Tue, 25 Feb 2025 17:45:31 +0100
From: "Rafael J. Wysocki" <rjw@...ysocki.net>
To: Linux PM <linux-pm@...r.kernel.org>
Cc: LKML <linux-kernel@...r.kernel.org>,
Alan Stern <stern@...land.harvard.edu>, Ulf Hansson <ulf.hansson@...aro.org>,
Johan Hovold <johan@...nel.org>,
Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>,
Saravana Kannan <saravanak@...gle.com>
Subject:
[PATCH v1 4/5] PM: sleep: Start suspending parents and suppliers after
subordinate suspend
From: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
In analogy with the previous change affecting the resume path,
make device_suspend() start the async suspend of the device's parent
and suppliers after the device itself has been processed and make
dpm_suspend() start processing "async" leaf devices (that is, devices
without children or consumers) upfront because they don't need to wait
for any other devices.
On the Dell XPS13 9360 in my office, this change reduces the total
duration of device suspend by approximately 100 ms (over 20%).
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
---
drivers/base/power/main.c | 73 +++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 69 insertions(+), 4 deletions(-)
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1237,6 +1237,49 @@
/*------------------------- Suspend routines -------------------------*/
+static bool dpm_leaf_device(struct device *dev)
+{
+ struct device *child;
+
+ lockdep_assert_held(&dpm_list_mtx);
+
+ child = device_find_any_child(dev);
+ if (child) {
+ put_device(child);
+
+ return false;
+ }
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return list_empty(&dev->links.consumers);
+}
+
+static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+
+ mutex_lock(&dpm_list_mtx);
+
+ /* Start processing the device's parent if it is "async". */
+ if (dev->parent)
+ dpm_async_unless_in_progress(dev->parent, func);
+
+ /*
+ * Start processing the device's "async" suppliers.
+ *
+ * The dpm_list_mtx locking is sufficient for this.
+ */
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_unless_in_progress(link->consumer, func);
+
+ mutex_unlock(&dpm_list_mtx);
+}
+
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
@@ -1663,6 +1706,8 @@
device_links_read_unlock(idx);
}
+static void async_suspend(void *data, async_cookie_t cookie);
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@@ -1791,7 +1836,13 @@
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_superior(dev, async_suspend);
+
+ return 0;
}
static void async_suspend(void *data, async_cookie_t cookie)
@@ -1809,6 +1860,7 @@
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
@@ -1822,15 +1874,28 @@
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront because they don't need
+ * to wait.
+ */
+ list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_fn(dev, async_suspend);
+ }
+
while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
+ dev = to_device(dpm_prepared_list.prev);
list_move(&dev->power.entry, &dpm_suspended_list);
- dpm_clear_async_state(dev);
- if (dpm_async_fn(dev, async_suspend))
+ dpm_async_unless_in_progress(dev, async_suspend);
+
+ if (dev->power.work_in_progress)
continue;
+ dev->power.work_in_progress = true;
+
get_device(dev);
mutex_unlock(&dpm_list_mtx);
Powered by blists - more mailing lists