lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 14 Apr 2022 22:47:01 -0400
From:   Donghai Qiao <dqiao@...hat.com>
To:     akpm@...ux-foundation.org, sfr@...b.auug.org.au, arnd@...db.de,
        peterz@...radead.org, heying24@...wei.com,
        andriy.shevchenko@...ux.intel.com, axboe@...nel.dk,
        rdunlap@...radead.org, tglx@...utronix.de, gor@...ux.ibm.com
Cc:     donghai.w.qiao@...il.com, linux-kernel@...r.kernel.org,
        Donghai Qiao <dqiao@...hat.com>
Subject: [PATCH 11/11] smp: modify up.c to adopt the same format of cross CPU call.

Since smp.c has been changed to use the new interface, up.c should
be changed to use the uniprocessor version of cross call as well.

Also clean up the dead code which left out after applying the precedent
patches of this patch set.

Signed-off-by: Donghai Qiao <dqiao@...hat.com>
---
 include/linux/smp.h |  7 ------
 kernel/up.c         | 56 +++++++++++++++++++++++++++++++++------------
 2 files changed, 42 insertions(+), 21 deletions(-)

diff --git a/include/linux/smp.h b/include/linux/smp.h
index f2e6c7a1be3d..1e29527123f8 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -202,9 +202,6 @@ extern void __smp_call_single_queue(int cpu, struct llist_node *node);
 /* total number of cpus in this system (may exceed NR_CPUS) */
 extern unsigned int total_cpus;
 
-int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
-			     int wait);
-
 /*
  * Cpus stopping functions in panic. All have default weak definitions.
  * Architecture-dependent code may override them.
@@ -290,13 +287,9 @@ static inline void smp_send_stop(void) { }
 static inline void up_smp_call_function(smp_call_func_t func, void *info)
 {
 }
-#define smp_call_function(func, info, wait) \
-			(up_smp_call_function(func, info))
 
 static inline void smp_send_reschedule(int cpu) { }
 #define smp_prepare_boot_cpu()			do {} while (0)
-#define smp_call_function_many(mask, func, info, wait) \
-			(up_smp_call_function(func, info))
 static inline void call_function_init(void) { }
 
 static inline void kick_all_cpus_sync(void) {  }
diff --git a/kernel/up.c b/kernel/up.c
index a38b8b095251..92c62c677e52 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -9,8 +9,7 @@
 #include <linux/smp.h>
 #include <linux/hypervisor.h>
 
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
-				int wait)
+int smp_xcall(int cpu, void (*func) (void *info), void *info, unsigned int type)
 {
 	unsigned long flags;
 
@@ -23,37 +22,66 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 
 	return 0;
 }
-EXPORT_SYMBOL(smp_call_function_single);
+EXPORT_SYMBOL(smp_xcall);
 
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+int smp_xcall_cond(int cpu, smp_call_func_t func, void *info,
+		   smp_cond_func_t cond_func, unsigned int type)
+{
+	int ret = 0;
+
+	preempt_disable();
+	if (!cond_func || cond_func(0, info))
+		ret = smp_xcall(cpu, func, info, type);
+
+	preempt_enable();
+
+	return ret;
+}
+EXPORT_SYMBOL(smp_xcall_cond);
+
+void smp_xcall_mask(const struct cpumask *mask, smp_call_func_t func, void *info, unsigned int type)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
-	csd->func(csd->info);
-	local_irq_restore(flags);
+	if (!cpumask_test_cpu(0, mask))
+		return;
+
+	preempt_disable();
+	smp_xcall(cpu, func, info, type);
+	preempt_enable();
+}
+EXPORT_SYMBOL(smp_xcall_mask);
+
+int smp_xcall_private(int cpu, struct __call_single_data *csd, unsigned int type)
+{
+	preempt_disable();
+
+	if (csd->func != NULL)
+		smp_xcall(cpu, csd->func, csd->info, type);
+
+	preempt_enable();
+
 	return 0;
 }
-EXPORT_SYMBOL(smp_call_function_single_async);
+EXPORT_SYMBOL(smp_xcall_private);
 
 /*
  * Preemption is disabled here to make sure the cond_func is called under the
  * same conditions in UP and SMP.
  */
-void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-			   void *info, bool wait, const struct cpumask *mask)
+void smp_xcall_mask_cond(const struct cpumask *mask, smp_call_func_t func,
+			 void *info, smp_cond_func_t cond_func,
+			 unsigned int type)
 {
 	unsigned long flags;
 
 	preempt_disable();
 	if ((!cond_func || cond_func(0, info)) && cpumask_test_cpu(0, mask)) {
-		local_irq_save(flags);
-		func(info);
-		local_irq_restore(flags);
+		smp_xcall(cpu, func, info, type);
 	}
 	preempt_enable();
 }
-EXPORT_SYMBOL(on_each_cpu_cond_mask);
+EXPORT_SYMBOL(smp_xcall_mask_cond);
 
 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
 {
-- 
2.27.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ