lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <tencent_B4B492A164C782F98C9C2E607E74B638FC0A@qq.com>
Date: Mon, 12 Jan 2026 14:49:53 +0800
From: wujing <realwujing@...com>
To: Alexei Starovoitov <ast@...nel.org>,
	Daniel Borkmann <daniel@...earbox.net>,
	Andrii Nakryiko <andrii@...nel.org>
Cc: John Fastabend <john.fastabend@...il.com>,
	bpf@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	wujing <realwujing@...com>,
	Qiliang Yuan <yuanql9@...natelecom.cn>
Subject: [PATCH] bpf/verifier: implement slab cache for verifier state list

The BPF verifier's state exploration logic in is_state_visited()
frequently allocates and deallocates 'struct bpf_verifier_state_list'
nodes to track explored states and prune the search space.

Currently, these allocations use generic kzalloc(), which can lead to
unnecessary memory fragmentation and performance overhead when
verifying high-complexity BPF programs with thousands of potential
states.

This patch introduces a dedicated slab cache, 'bpf_verifier_state_list',
to manage these allocations more efficiently. This provides better
allocation speed, reduced fragmentation, and improved cache locality
during the verification process.

Summary of changes:
- Define global 'bpf_verifier_state_list_cachep'.
- Initialize the cache via late_initcall() in bpf_verifier_init().
- Use kmem_cache_zalloc() in is_state_visited() to allocate new states.
- Replace kfree() with kmem_cache_free() in maybe_free_verifier_state(),
  is_state_visited() error paths, and free_states().

Signed-off-by: wujing <realwujing@...com>
Signed-off-by: Qiliang Yuan <yuanql9@...natelecom.cn>
---
 kernel/bpf/verifier.c | 20 +++++++++++++++-----
 1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 169845710c7e..5c1be0cae4c2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -46,6 +46,7 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
 
 struct bpf_mem_alloc bpf_global_percpu_ma;
 static bool bpf_global_percpu_ma_set;
+static struct kmem_cache *bpf_verifier_state_list_cachep;
 
 /* bpf_check() is a static code analyzer that walks eBPF program
  * instruction by instruction and updates register/stack state.
@@ -1711,7 +1712,7 @@ static void maybe_free_verifier_state(struct bpf_verifier_env *env,
 			loop_entry_sl->state.used_as_loop_entry--;
 		list_del(&sl->node);
 		free_verifier_state(&sl->state, false);
-		kfree(sl);
+		kmem_cache_free(bpf_verifier_state_list_cachep, sl);
 		env->free_list_size--;
 		sl = loop_entry_sl;
 	}
@@ -19282,7 +19283,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 	 * When looping the sl->state.branches will be > 0 and this state
 	 * will not be considered for equivalence until branches == 0.
 	 */
-	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
+	new_sl = kmem_cache_zalloc(bpf_verifier_state_list_cachep, GFP_KERNEL);
 	if (!new_sl)
 		return -ENOMEM;
 	env->total_states++;
@@ -19300,7 +19301,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 	err = copy_verifier_state(new, cur);
 	if (err) {
 		free_verifier_state(new, false);
-		kfree(new_sl);
+		kmem_cache_free(bpf_verifier_state_list_cachep, new_sl);
 		return err;
 	}
 	new->insn_idx = insn_idx;
@@ -22666,7 +22667,7 @@ static void free_states(struct bpf_verifier_env *env)
 	list_for_each_safe(pos, tmp, &env->free_list) {
 		sl = container_of(pos, struct bpf_verifier_state_list, node);
 		free_verifier_state(&sl->state, false);
-		kfree(sl);
+		kmem_cache_free(bpf_verifier_state_list_cachep, sl);
 	}
 	INIT_LIST_HEAD(&env->free_list);
 
@@ -22679,7 +22680,7 @@ static void free_states(struct bpf_verifier_env *env)
 		list_for_each_safe(pos, tmp, head) {
 			sl = container_of(pos, struct bpf_verifier_state_list, node);
 			free_verifier_state(&sl->state, false);
-			kfree(sl);
+			kmem_cache_free(bpf_verifier_state_list_cachep, sl);
 		}
 		INIT_LIST_HEAD(&env->explored_states[i]);
 	}
@@ -24199,3 +24200,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
 	kvfree(env);
 	return ret;
 }
+
+static int __init bpf_verifier_init(void)
+{
+	bpf_verifier_state_list_cachep = kmem_cache_create("bpf_verifier_state_list",
+							   sizeof(struct bpf_verifier_state_list),
+							   0, SLAB_PANIC, NULL);
+	return 0;
+}
+late_initcall(bpf_verifier_init);
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ