lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250815061824.765906-8-dongml2@chinatelecom.cn>
Date: Fri, 15 Aug 2025 14:18:24 +0800
From: Menglong Dong <menglong8.dong@...il.com>
To: ast@...nel.org
Cc: daniel@...earbox.net,
	john.fastabend@...il.com,
	andrii@...nel.org,
	martin.lau@...ux.dev,
	eddyz87@...il.com,
	song@...nel.org,
	yonghong.song@...ux.dev,
	kpsingh@...nel.org,
	sdf@...ichev.me,
	haoluo@...gle.com,
	jolsa@...nel.org,
	bpf@...r.kernel.org,
	linux-kernel@...r.kernel.org
Subject: [PATCH bpf-next 7/7] bpf: use rcu_migrate_* for trampoline.c

Replace the migrate_disable/migrate_enable with
rcu_migrate_disable/rcu_migrate_enable in trampoline.c to obtain better
performance when PREEMPT_RCU is not enabled.

Signed-off-by: Menglong Dong <dongml2@...natelecom.cn>
---
 kernel/bpf/trampoline.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 0e364614c3a2..a0608152c394 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -900,7 +900,7 @@ static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tram
 	__acquires(RCU)
 {
 	rcu_read_lock();
-	migrate_disable();
+	rcu_migrate_disable();
 
 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
@@ -949,7 +949,7 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
 
 	update_prog_stats(prog, start);
 	this_cpu_dec(*(prog->active));
-	migrate_enable();
+	rcu_migrate_enable();
 	rcu_read_unlock();
 }
 
@@ -961,7 +961,7 @@ static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
 	 * programs, not the shims.
 	 */
 	rcu_read_lock();
-	migrate_disable();
+	rcu_migrate_disable();
 
 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
@@ -974,7 +974,7 @@ static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
 {
 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
-	migrate_enable();
+	rcu_migrate_enable();
 	rcu_read_unlock();
 }
 
@@ -1034,7 +1034,7 @@ static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
 	__acquires(RCU)
 {
 	rcu_read_lock();
-	migrate_disable();
+	rcu_migrate_disable();
 
 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
@@ -1048,7 +1048,7 @@ static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
 	update_prog_stats(prog, start);
-	migrate_enable();
+	rcu_migrate_enable();
 	rcu_read_unlock();
 }
 
-- 
2.50.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ