[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250407180838.42877-6-andybnac@gmail.com>
Date: Tue, 8 Apr 2025 02:08:30 +0800
From: Andy Chiu <andybnac@...il.com>
To: linux-riscv@...ts.infradead.org,
alexghiti@...osinc.com,
palmer@...belt.com
Cc: Andy Chiu <andy.chiu@...ive.com>,
linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
Mark Rutland <mark.rutland@....com>,
Alexandre Ghiti <alex@...ti.fr>,
bjorn@...osinc.com,
puranjay12@...il.com,
paul.walmsley@...ive.com,
greentime.hu@...ive.com,
nick.hu@...ive.com,
nylon.chen@...ive.com,
eric.lin@...ive.com,
vicent.chen@...ive.com,
zong.li@...ive.com,
yongxuan.wang@...ive.com,
samuel.holland@...ive.com,
olivia.chu@...ive.com,
c2232430@...il.com
Subject: [PATCH v4 06/12] riscv: ftrace: do not use stop_machine to update code
From: Andy Chiu <andy.chiu@...ive.com>
Now it is safe to remove dependency from stop_machine() for us to patch
code in ftrace.
Signed-off-by: Andy Chiu <andy.chiu@...ive.com>
---
Changelog v4:
- assume ftrace_update_ftrace_func is always called with irqs enabled
---
arch/riscv/kernel/ftrace.c | 64 ++++++--------------------------------
1 file changed, 10 insertions(+), 54 deletions(-)
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index cf78eef073a0..aca1a322e0aa 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -24,23 +24,13 @@ unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
}
#ifdef CONFIG_DYNAMIC_FTRACE
-void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+void arch_ftrace_update_code(int command)
{
mutex_lock(&text_mutex);
-
- /*
- * The code sequences we use for ftrace can't be patched while the
- * kernel is running, so we need to use stop_machine() to modify them
- * for now. This doesn't play nice with text_mutex, we use this flag
- * to elide the check.
- */
- riscv_patch_in_stop_machine = true;
-}
-
-void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
-{
- riscv_patch_in_stop_machine = false;
+ command |= FTRACE_MAY_SLEEP;
+ ftrace_modify_all_code(command);
mutex_unlock(&text_mutex);
+ flush_icache_all();
}
static int __ftrace_modify_call(unsigned long source, unsigned long target, bool validate)
@@ -129,51 +119,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
* before the write to function_trace_op later in the generic ftrace.
* If the sequence is not enforced, then an old ftrace_call_dest may
* race loading a new function_trace_op set in ftrace_modify_all_code
- *
- * If we are in stop_machine, then we don't need to call remote fence
- * as there is no concurrent read-side of ftrace_call_dest.
*/
smp_wmb();
- if (!irqs_disabled())
- smp_call_function(ftrace_sync_ipi, NULL, 1);
- return 0;
-}
-
-struct ftrace_modify_param {
- int command;
- atomic_t cpu_count;
-};
-
-static int __ftrace_modify_code(void *data)
-{
- struct ftrace_modify_param *param = data;
-
- if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
- ftrace_modify_all_code(param->command);
- /*
- * Make sure the patching store is effective *before* we
- * increment the counter which releases all waiting CPUs
- * by using the release variant of atomic increment. The
- * release pairs with the call to local_flush_icache_all()
- * on the waiting CPU.
- */
- atomic_inc_return_release(¶m->cpu_count);
- } else {
- while (atomic_read(¶m->cpu_count) <= num_online_cpus())
- cpu_relax();
-
- local_flush_icache_all();
- }
-
+ /*
+ * Updating ftrace dpes not take stop_machine path, so irqs should not
+ * be disabled.
+ */
+ WARN_ON(irqs_disabled());
+ smp_call_function(ftrace_sync_ipi, NULL, 1);
return 0;
}
-void arch_ftrace_update_code(int command)
-{
- struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
-
- stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask);
-}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
--
2.39.3 (Apple Git-145)
Powered by blists - more mailing lists