lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:	Mon, 25 Aug 2014 09:27:12 -0500
From:	kodiak furr <boxerspam1@...il.com>
To:	xiaoming wang <xiaoming.wang@...el.com>
Cc:	gregkh@...uxfoundation.org, linux-pm@...r.kernel.org,
	rjw@...ysocki.net, Chuansheng Liu <chuansheng.liu@...el.com>,
	len.brown@...el.com, linux-kernel@...r.kernel.org, pavel@....cz
Subject: Re: [PATCH 2/2] PM / sleep: Asynchronous threads for dpm_complete

kodiak furr liked your message with Boxer for Android.

On Aug 25, 2014 10:13 AM, xiaoming wang <xiaoming.wang@...el.com> wrote:
>
> In analogy with commits 5af84b82701a and 97df8c12995, 
> using asynchronous threads can improve the overall 
> resume time significantly. 
>
> This patch is for dpm_complete phase. 
>
> Signed-off-by: Chuansheng Liu <chuansheng.liu@...el.com> 
> Signed-off-by: xiaoming wang <xiaoming.wang@...el.com> 
> --- 
> drivers/base/power/main.c |   38 ++++++++++++++++++++++++++++++++++---- 
> 1 files changed, 34 insertions(+), 4 deletions(-) 
>
> diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c 
> index f9fe1b3..00c4bf1 100644 
> --- a/drivers/base/power/main.c 
> +++ b/drivers/base/power/main.c 
> @@ -889,14 +889,15 @@ void dpm_resume(pm_message_t state) 
>   * @dev: Device to handle. 
>   * @state: PM transition of the system being carried out. 
>   */ 
> -static void device_complete(struct device *dev, pm_message_t state) 
> +static void device_complete(struct device *dev, pm_message_t state, bool async) 
> { 
> void (*callback)(struct device *) = NULL; 
> char *info = NULL; 
>
> if (dev->power.syscore) 
> - return; 
> + goto Complete; 
>
> + dpm_wait(dev->parent, async); 
> device_lock(dev); 
>
> if (dev->pm_domain) { 
> @@ -928,6 +929,17 @@ static void device_complete(struct device *dev, pm_message_t state) 
> device_unlock(dev); 
>
> pm_runtime_put(dev); 
> + 
> +Complete: 
> + complete_all(&dev->power.completion); 
> +} 
> + 
> +static void async_complete(void *data, async_cookie_t cookie) 
> +{ 
> + struct device *dev = (struct device *)data; 
> + 
> + device_complete(dev, pm_transition, true); 
> + put_device(dev); 
> } 
>
> /** 
> @@ -940,27 +952,45 @@ static void device_complete(struct device *dev, pm_message_t state) 
> void dpm_complete(pm_message_t state) 
> { 
> struct list_head list; 
> + struct device *dev; 
>
> trace_suspend_resume(TPS("dpm_complete"), state.event, true); 
> might_sleep(); 
>
> INIT_LIST_HEAD(&list); 
> mutex_lock(&dpm_list_mtx); 
> + pm_transition = state; 
> + 
> + /* 
> +   * Advanced the async threads upfront, 
> +   * in case the starting of async threads is 
> +   * delayed by non-async resuming devices. 
> +   */ 
> + list_for_each_entry(dev, &dpm_prepared_list, power.entry) { 
> + reinit_completion(&dev->power.completion); 
> + if (is_async(dev)) { 
> + get_device(dev); 
> + async_schedule(async_complete, dev); 
> + } 
> + } 
> + 
> while (!list_empty(&dpm_prepared_list)) { 
> - struct device *dev = to_device(dpm_prepared_list.prev); 
> + dev = to_device(dpm_prepared_list.prev); 
>
> get_device(dev); 
> dev->power.is_prepared = false; 
> list_move(&dev->power.entry, &list); 
> mutex_unlock(&dpm_list_mtx); 
>
> - device_complete(dev, state); 
> + if (!is_async(dev)) 
> + device_complete(dev, state, false); 
>
> mutex_lock(&dpm_list_mtx); 
> put_device(dev); 
> } 
> list_splice(&list, &dpm_list); 
> mutex_unlock(&dpm_list_mtx); 
> + async_synchronize_full(); 
> trace_suspend_resume(TPS("dpm_complete"), state.event, false); 
> } 
>
> -- 
> 1.7.1 
>
> -- 
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in 
> the body of a message to majordomo@...r.kernel.org 
> More majordomo info at  http://vger.kernel.org/majordomo-info.html 
> Please read the FAQ at  http://www.tux.org/lkml/ 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ