[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220519204943.1079578-6-dqiao@redhat.com>
Date: Thu, 19 May 2022 16:49:37 -0400
From: Donghai Qiao <dqiao@...hat.com>
To: akpm@...ux-foundation.org, sfr@...b.auug.org.au, arnd@...db.de,
peterz@...radead.org, heying24@...wei.com,
andriy.shevchenko@...ux.intel.com, axboe@...nel.dk,
rdunlap@...radead.org, tglx@...utronix.de, gor@...ux.ibm.com
Cc: donghai.w.qiao@...il.com, linux-kernel@...r.kernel.org,
Donghai Qiao <dqiao@...hat.com>
Subject: [PATCH v4 05/11] smp: replace smp_call_function_single_async with smp_call_csd
Replaced smp_call_function_single_async with smp_call_csd and extended
it to support one CPU synchronous call with preallocated csd structure.
Signed-off-by: Donghai Qiao <dqiao@...hat.com>
---
v1 -> v2: removed 'x' from the function names and change XCALL to
SMP_CALL from the new macros
v2 -> v3: Changed the call of smp_call_private() to smp_call_csd()
include/linux/smp.h | 3 +-
kernel/smp.c | 157 ++++++++++++++++++++------------------------
2 files changed, 75 insertions(+), 85 deletions(-)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 06a20454fd53..b4885e45690b 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -193,7 +193,8 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, const struct cpumask *mask);
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+#define smp_call_function_single_async(cpu, csd) \
+ smp_call_csd(cpu, csd, SMP_CALL_TYPE_ASYNC)
/*
* Cpus stopping functions in panic. All have default weak definitions.
diff --git a/kernel/smp.c b/kernel/smp.c
index 8fdea9547502..f08135ad70e3 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -444,41 +444,6 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
send_call_function_single_ipi(cpu);
}
-/*
- * Insert a previously allocated call_single_data_t element
- * for execution on the given CPU. data must already have
- * ->func, ->info, and ->flags set.
- */
-static int generic_exec_single(int cpu, struct __call_single_data *csd)
-{
- if (cpu == smp_processor_id()) {
- smp_call_func_t func = csd->func;
- void *info = csd->info;
- unsigned long flags;
-
- /*
- * We can unlock early even for the synchronous on-stack case,
- * since we're doing this from the same CPU..
- */
- csd_lock_record(csd);
- csd_unlock(csd);
- local_irq_save(flags);
- func(info);
- csd_lock_record(NULL);
- local_irq_restore(flags);
- return 0;
- }
-
- if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
- csd_unlock(csd);
- return -ENXIO;
- }
-
- __smp_call_single_queue(cpu, &csd->node.llist);
-
- return 0;
-}
-
/**
* generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
*
@@ -676,52 +641,6 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
}
EXPORT_SYMBOL(smp_call_function_single);
-/**
- * smp_call_function_single_async() - Run an asynchronous function on a
- * specific CPU.
- * @cpu: The CPU to run on.
- * @csd: Pre-allocated and setup data structure
- *
- * Like smp_call_function_single(), but the call is asynchonous and
- * can thus be done from contexts with disabled interrupts.
- *
- * The caller passes his own pre-allocated data structure
- * (ie: embedded in an object) and is responsible for synchronizing it
- * such that the IPIs performed on the @csd are strictly serialized.
- *
- * If the function is called with one csd which has not yet been
- * processed by previous call to smp_call_function_single_async(), the
- * function will return immediately with -EBUSY showing that the csd
- * object is still in progress.
- *
- * NOTE: Be careful, there is unfortunately no current debugging facility to
- * validate the correctness of this serialization.
- *
- * Return: %0 on success or negative errno value on error
- */
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
-{
- int err = 0;
-
- preempt_disable();
-
- if (csd->node.u_flags & CSD_FLAG_LOCK) {
- err = -EBUSY;
- goto out;
- }
-
- csd->node.u_flags = CSD_FLAG_LOCK;
- smp_wmb();
-
- err = generic_exec_single(cpu, csd);
-
-out:
- preempt_enable();
-
- return err;
-}
-EXPORT_SYMBOL_GPL(smp_call_function_single_async);
-
/*
* smp_call_function_any - Run a function on any of the given cpus
* @mask: The mask of cpus it can run on.
@@ -1304,15 +1223,85 @@ EXPORT_SYMBOL(smp_call_mask_cond);
* Because of that, this function can be used from the contexts with disabled
* interrupts.
*
- * Parameters
+ * The bit CSD_FLAG_LOCK will be set to csd->node.u_flags only if the call
+ * is made as type CSD_TYPE_SYNC or CSD_TYPE_ASYNC.
*
+ * Parameters
* cpu: Must be a positive value less than nr_cpu_id.
* csd: The private csd provided by the callers.
- *
* Others: see smp_call().
+ *
+ * Return: %0 on success or negative errno value on error.
+ *
+ * The following comments are from smp_call_function_single_async():
+ *
+ * The call is asynchronous and can thus be done from contexts with
+ * disabled interrupts. If the function is called with one csd which
+ * has not yet been processed by previous call, the function will
+ * return immediately with -EBUSY showing that the csd object is
+ * still in progress.
+ *
+ * NOTE: Be careful, there is unfortunately no current debugging
+ * facility to validate the correctness of this serialization.
*/
int smp_call_csd(int cpu, call_single_data_t *csd, unsigned int flags)
{
- return 0;
+ int err = 0;
+
+ if ((unsigned int)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
+ pr_warn("cpu ID must be a positive number < nr_cpu_ids and must be currently online\n");
+ return -EINVAL;
+ }
+
+ if (csd == NULL) {
+ pr_warn("csd must not be NULL\n");
+ return -EINVAL;
+ }
+
+ preempt_disable();
+ if (csd->node.u_flags & CSD_FLAG_LOCK) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * CSD_FLAG_LOCK is set for CSD_TYPE_SYNC or CSD_TYPE_ASYNC only.
+ */
+ if ((flags & ~(CSD_TYPE_SYNC | CSD_TYPE_ASYNC)) == 0)
+ csd->node.u_flags = CSD_FLAG_LOCK | flags;
+ else
+ csd->node.u_flags = flags;
+
+ if (cpu == smp_processor_id()) {
+ smp_call_func_t func = csd->func;
+ void *info = csd->info;
+ unsigned long flags;
+
+ /*
+ * We can unlock early even for the synchronous on-stack case,
+ * since we're doing this from the same CPU..
+ */
+ csd_lock_record(csd);
+ csd_unlock(csd);
+ local_irq_save(flags);
+ func(info);
+ csd_lock_record(NULL);
+ local_irq_restore(flags);
+ goto out;
+ }
+
+ /*
+ * Ensure the flags are visible before the csd
+ * goes to the queue.
+ */
+ smp_wmb();
+
+ __smp_call_single_queue(cpu, &csd->node.llist);
+
+ if (flags & CSD_TYPE_SYNC)
+ csd_lock_wait(csd);
+out:
+ preempt_enable();
+ return err;
}
EXPORT_SYMBOL(smp_call_csd);
--
2.27.0
Powered by blists - more mailing lists