lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1406106694-3306-1-git-send-email-tianyu.lan@intel.com>
Date:	Wed, 23 Jul 2014 17:11:34 +0800
From:	Lan Tianyu <tianyu.lan@...el.com>
To:	rjw@...ysocki.net, len.brown@...el.com, pavel@....cz,
	peterz@...radead.org, toshi.kani@...com, mingo@...nel.org,
	akpm@...ux-foundation.org, todd.e.brandt@...ux.intel.com,
	fabf@...net.be, srivatsa.bhat@...ux.vnet.ibm.com,
	tianyu.lan@...el.com, ego@...ux.vnet.ibm.com
Cc:	rafael.j.wysocki@...el.com, linux-kernel@...r.kernel.org,
	linux-pm@...r.kernel.org
Subject: [RFC PATCH] PM/CPU: Parallel enabling nonboot cpus with resume devices

In the current world, all nonboot cpus are enabled serially during system
resume. System resume sequence is that boot cpu enables nonboot cpu one by
one and then resume devices. Before resuming devices, there are few tasks
assigned to nonboot cpus after they are brought up. This waste cpu usage.

To accelerate S3, this patches adds a new kernel configure
PM_PARALLEL_CPU_UP_FOR_SUSPEND to allow boot cpu to go forward to resume
devices after bringing up one nonboot cpu. The nonboot cpu will be in charge
of bringing up other cpus. This makes enabling cpu2~x parallel with resuming
devices. From the test result on 4 logical core laptop, the time of resume
device almost wasn't affected by enabling nonboot cpus lately while the start
point is almost 30ms earlier than before.

Signed-off-by: Lan Tianyu <tianyu.lan@...el.com>
---
 kernel/cpu.c         | 82 ++++++++++++++++++++++++++++++++++++++++++++++------
 kernel/power/Kconfig | 13 +++++++++
 2 files changed, 86 insertions(+), 9 deletions(-)

diff --git a/kernel/cpu.c b/kernel/cpu.c
index a343bde..d4c1353 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -551,9 +551,27 @@ void __weak arch_enable_nonboot_cpus_end(void)
 {
 }
 
+static int _cpu_up_with_trace(int cpu)
+{
+	int error;
+
+	trace_suspend_resume(TPS("CPU_ON"), cpu, true);
+	error = _cpu_up(cpu, 1);
+	trace_suspend_resume(TPS("CPU_ON"), cpu, false);
+	if (error) {
+		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
+		return error;
+	}
+
+	pr_info("CPU%d is up\n", cpu);
+	return 0;
+}
+
+#ifndef CONFIG_PM_PARALLEL_CPU_UP_FOR_SUSPEND
+
 void __ref enable_nonboot_cpus(void)
 {
-	int cpu, error;
+	int cpu;
 
 	/* Allow everyone to use the CPU hotplug again */
 	cpu_maps_update_begin();
@@ -566,14 +584,7 @@ void __ref enable_nonboot_cpus(void)
 	arch_enable_nonboot_cpus_begin();
 
 	for_each_cpu(cpu, frozen_cpus) {
-		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
-		error = _cpu_up(cpu, 1);
-		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
-		if (!error) {
-			pr_info("CPU%d is up\n", cpu);
-			continue;
-		}
-		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
+		_cpu_up_with_trace(cpu);
 	}
 
 	arch_enable_nonboot_cpus_end();
@@ -583,6 +594,59 @@ out:
 	cpu_maps_update_done();
 }
 
+#else
+
+static int async_enable_nonboot_cpus(void *data)
+{
+	int cpu;
+
+	cpu_maps_update_begin();
+
+	for_each_cpu(cpu, frozen_cpus) {
+		_cpu_up_with_trace(cpu);
+	}
+
+	arch_enable_nonboot_cpus_end();
+	cpumask_clear(frozen_cpus);
+	cpu_maps_update_done();
+	return 0;
+}
+
+void __ref enable_nonboot_cpus(void)
+{
+	struct task_struct *tsk;
+	int cpu;
+
+	/* Allow everyone to use the CPU hotplug again */
+	cpu_maps_update_begin();
+	cpu_hotplug_disabled = 0;
+	if (cpumask_empty(frozen_cpus))
+		goto out;
+
+	arch_enable_nonboot_cpus_begin();
+
+	cpu = cpumask_first(frozen_cpus);
+	cpumask_clear_cpu(cpu, frozen_cpus);
+
+	_cpu_up_with_trace(cpu);
+
+	if (cpumask_empty(frozen_cpus)) {
+		arch_enable_nonboot_cpus_end();
+	} else {
+		tsk = kthread_create_on_cpu(async_enable_nonboot_cpus,
+				NULL, cpu, "async-enable-nonboot-cpus");
+		if (IS_ERR(tsk)) {
+			pr_err("Failed to create async enable nonboot cpus thread.\n");
+			goto out;
+		}
+
+		kthread_unpark(tsk);
+	}
+out:
+	cpu_maps_update_done();
+}
+#endif
+
 static int __init alloc_frozen_cpus(void)
 {
 	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9a83d78..e5e6671 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -110,6 +110,19 @@ config PM_AUTOSLEEP
 	Allow the kernel to trigger a system transition into a global sleep
 	state automatically whenever there are no active wakeup sources.
 
+config PM_PARALLEL_CPU_UP_FOR_SUSPEND
+	bool "Parallel non-boot cpus up for system suspend"
+	depends on SMP
+	depends on PM_SLEEP
+	default n
+	---help---
+	In the current world, all nonboot cpus are enabled serially during system
+	resume. System resume sequence is that boot cpu enables nonboot cpu one by
+	one and then resume devices. Before resuming devices, there are few tasks
+	assigned to nonboot cpus. To accelerate S3, this option allows boot cpu
+	to go forward to resume devices after bringing up one nonboot cpu. The
+	nonboot cpu will be in charge of bring up other cpus.
+
 config PM_WAKELOCKS
 	bool "User space wakeup sources interface"
 	depends on PM_SLEEP
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ