lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 22 Aug 2022 10:15:11 +0800
From:   Pingfan Liu <kernelfans@...il.com>
To:     linux-kernel@...r.kernel.org
Cc:     Pingfan Liu <kernelfans@...il.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Steven Price <steven.price@....com>,
        Andi Kleen <ak@...ux.intel.com>,
        Frederic Weisbecker <frederic@...nel.org>,
        "Jason A. Donenfeld" <Jason@...c4.com>,
        Mark Rutland <mark.rutland@....com>
Subject: [RFC 01/10] cpu/hotplug: Make __cpuhp_kick_ap() ready for async

At present, during the kexec reboot, the teardown of cpus can not run in
parallel. As the first step towards the parallel, it demands the
initiator to kick ap thread one by one instead of waiting for each ap
thread completion.

Change the prototype of __cpuhp_kick_ap() to cope with this demand.

Signed-off-by: Pingfan Liu <kernelfans@...il.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Steven Price <steven.price@....com>
Cc: "Peter Zijlstra
Cc: Andi Kleen <ak@...ux.intel.com>
Cc: Frederic Weisbecker <frederic@...nel.org>
Cc: "Jason A. Donenfeld" <Jason@...c4.com>
Cc: Mark Rutland <mark.rutland@....com>
To: linux-kernel@...r.kernel.org
---
 kernel/cpu.c | 41 ++++++++++++++++++++++++++++++-----------
 1 file changed, 30 insertions(+), 11 deletions(-)

diff --git a/kernel/cpu.c b/kernel/cpu.c
index bbad5e375d3b..338e1d426c7e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -526,7 +526,7 @@ cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
 }
 
 /* Regular hotplug invocation of the AP hotplug thread */
-static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
+static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st, bool sync)
 {
 	if (!st->single && st->state == st->target)
 		return;
@@ -539,20 +539,22 @@ static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
 	smp_mb();
 	st->should_run = true;
 	wake_up_process(st->thread);
-	wait_for_ap_thread(st, st->bringup);
+	if (sync)
+		wait_for_ap_thread(st, st->bringup);
 }
 
 static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
-			 enum cpuhp_state target)
+		enum cpuhp_state target, bool sync)
 {
 	enum cpuhp_state prev_state;
 	int ret;
 
 	prev_state = cpuhp_set_state(cpu, st, target);
-	__cpuhp_kick_ap(st);
-	if ((ret = st->result)) {
+	__cpuhp_kick_ap(st, sync);
+	ret = st->result;
+	if (sync && ret) {
 		cpuhp_reset_state(cpu, st, prev_state);
-		__cpuhp_kick_ap(st);
+		__cpuhp_kick_ap(st, true);
 	}
 
 	return ret;
@@ -583,7 +585,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
 	if (st->target <= CPUHP_AP_ONLINE_IDLE)
 		return 0;
 
-	return cpuhp_kick_ap(cpu, st, st->target);
+	return cpuhp_kick_ap(cpu, st, st->target, true);
 }
 
 static int bringup_cpu(unsigned int cpu)
@@ -835,7 +837,7 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
 	st->cb_state = state;
 	st->single = true;
 
-	__cpuhp_kick_ap(st);
+	__cpuhp_kick_ap(st, true);
 
 	/*
 	 * If we failed and did a partial, do a rollback.
@@ -844,7 +846,7 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
 		st->rollback = true;
 		st->bringup = !bringup;
 
-		__cpuhp_kick_ap(st);
+		__cpuhp_kick_ap(st, true);
 	}
 
 	/*
@@ -868,12 +870,29 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
 	cpuhp_lock_release(true);
 
 	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
-	ret = cpuhp_kick_ap(cpu, st, st->target);
+	ret = cpuhp_kick_ap(cpu, st, st->target, true);
 	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
 
 	return ret;
 }
 
+/* In the async case, trace is meaningless since ret value is not available */
+static int cpuhp_kick_ap_work_async(unsigned int cpu)
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+	int ret;
+
+	cpuhp_lock_acquire(false);
+	cpuhp_lock_release(false);
+
+	cpuhp_lock_acquire(true);
+	cpuhp_lock_release(true);
+
+	ret = cpuhp_kick_ap(cpu, st, st->target, false);
+
+	return ret;
+}
+
 static struct smp_hotplug_thread cpuhp_threads = {
 	.store			= &cpuhp_state.thread,
 	.thread_should_run	= cpuhp_should_run,
@@ -1171,7 +1190,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 	if (ret && st->state < prev_state) {
 		if (st->state == CPUHP_TEARDOWN_CPU) {
 			cpuhp_reset_state(cpu, st, prev_state);
-			__cpuhp_kick_ap(st);
+			__cpuhp_kick_ap(st, true);
 		} else {
 			WARN(1, "DEAD callback error for CPU%d", cpu);
 		}
-- 
2.31.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ