lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260203112401.3889029-5-zhouchuyi@bytedance.com>
Date: Tue,  3 Feb 2026 19:23:54 +0800
From: "Chuyi Zhou" <zhouchuyi@...edance.com>
To: <tglx@...utronix.de>, <mingo@...hat.com>, <luto@...nel.org>, 
	<peterz@...radead.org>, <paulmck@...nel.org>, <muchun.song@...ux.dev>, 
	<bp@...en8.de>, <dave.hansen@...ux.intel.com>
Cc: <linux-kernel@...r.kernel.org>, "Chuyi Zhou" <zhouchuyi@...edance.com>
Subject: [PATCH 04/11] smp: Use on-stack cpumask in smp_call_function_many_cond

This patch use on-stack cpumask to replace percpu cfd cpumask in
smp_call_function_many_cond(). alloc_cpumask_var() may fail when
CONFIG_CPUMASK_OFFSTACK is enabled. In such extreme case, fall back to
cfd->cpumask. This is a preparation for the next patch.

Signed-off-by: Chuyi Zhou <zhouchuyi@...edance.com>
---
 kernel/smp.c | 22 +++++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index f572716c3c7d..35948afced2e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -805,11 +805,17 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
 	int cpu, last_cpu, this_cpu = smp_processor_id();
 	struct call_function_data *cfd;
 	bool wait = scf_flags & SCF_WAIT;
+	bool preemptible_wait = true;
+	cpumask_var_t cpumask_stack;
+	struct cpumask *cpumask;
 	int nr_cpus = 0;
 	bool run_remote = false;
 
 	lockdep_assert_preemption_disabled();
 
+	if (!alloc_cpumask_var(&cpumask_stack, GFP_ATOMIC))
+		preemptible_wait = false;
+
 	/*
 	 * Can deadlock when called with interrupts disabled.
 	 * We allow cpu's that are not yet online though, as no one else can
@@ -831,15 +837,18 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
 	/* Check if we need remote execution, i.e., any CPU excluding this one. */
 	if (cpumask_any_and_but(mask, cpu_online_mask, this_cpu) < nr_cpu_ids) {
 		cfd = this_cpu_ptr(&cfd_data);
-		cpumask_and(cfd->cpumask, mask, cpu_online_mask);
-		__cpumask_clear_cpu(this_cpu, cfd->cpumask);
+
+		cpumask = preemptible_wait ? cpumask_stack : cfd->cpumask;
+
+		cpumask_and(cpumask, mask, cpu_online_mask);
+		__cpumask_clear_cpu(this_cpu, cpumask);
 
 		cpumask_clear(cfd->cpumask_ipi);
-		for_each_cpu(cpu, cfd->cpumask) {
+		for_each_cpu(cpu, cpumask) {
 			call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
 
 			if (cond_func && !cond_func(cpu, info)) {
-				__cpumask_clear_cpu(cpu, cfd->cpumask);
+				__cpumask_clear_cpu(cpu, cpumask);
 				continue;
 			}
 
@@ -890,13 +899,16 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
 	}
 
 	if (run_remote && wait) {
-		for_each_cpu(cpu, cfd->cpumask) {
+		for_each_cpu(cpu, cpumask) {
 			call_single_data_t *csd;
 
 			csd = per_cpu_ptr(cfd->csd, cpu);
 			csd_lock_wait(csd);
 		}
 	}
+
+	if (preemptible_wait)
+		free_cpumask_var(cpumask_stack);
 }
 
 /**
-- 
2.20.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ