[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <tencent_0074C23A28B59EA264C502FA3C9EF6622A0A@qq.com>
Date: Mon, 12 Jan 2026 20:24:47 +0800
From: wujing <realwujing@...com>
To: ast@...nel.org,
daniel@...earbox.net,
andrii@...nel.org
Cc: john.fastabend@...il.com,
martin.lau@...ux.dev,
eddyz87@...il.com,
song@...nel.org,
yonghong.song@...ux.dev,
kpsingh@...nel.org,
sdf@...ichev.me,
haoluo@...gle.com,
jolsa@...nel.org,
bpf@...r.kernel.org,
linux-kernel@...r.kernel.org,
wujing <realwujing@...com>,
Qiliang Yuan <yuanql9@...natelecom.cn>
Subject: [PATCH] bpf/verifier: implement slab cache for verifier state list
The BPF verifier's state exploration logic in is_state_visited()
frequently allocates and deallocates 'struct bpf_verifier_state_list'
nodes to track explored states and prune the search space.
Currently, these allocations use generic kzalloc(), which can lead to
unnecessary memory fragmentation and performance overhead when
verifying high-complexity BPF programs with thousands of potential
states.
This patch introduces a dedicated slab cache, 'bpf_verifier_state_list',
to manage these allocations more efficiently. This provides better
allocation speed, reduced fragmentation, and improved cache locality
during the verification process.
Summary of changes:
- Define global 'bpf_verifier_state_list_cachep'.
- Initialize the cache via late_initcall() in bpf_verifier_init().
- Use kmem_cache_zalloc() in is_state_visited() to allocate new states.
- Replace kfree() with kmem_cache_free() in maybe_free_verifier_state(),
is_state_visited() error paths, and free_states().
Signed-off-by: wujing <realwujing@...com>
Signed-off-by: Qiliang Yuan <yuanql9@...natelecom.cn>
---
kernel/bpf/verifier.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f0ca69f888fa..681e35fa5a0f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -52,6 +52,7 @@ enum bpf_features {
struct bpf_mem_alloc bpf_global_percpu_ma;
static bool bpf_global_percpu_ma_set;
+static struct kmem_cache *bpf_verifier_state_list_cachep;
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
@@ -1718,7 +1719,7 @@ static void maybe_free_verifier_state(struct bpf_verifier_env *env,
return;
list_del(&sl->node);
free_verifier_state(&sl->state, false);
- kfree(sl);
+ kmem_cache_free(bpf_verifier_state_list_cachep, sl);
env->free_list_size--;
}
@@ -20023,7 +20024,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* When looping the sl->state.branches will be > 0 and this state
* will not be considered for equivalence until branches == 0.
*/
- new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL_ACCOUNT);
+ new_sl = kmem_cache_zalloc(bpf_verifier_state_list_cachep, GFP_KERNEL_ACCOUNT);
if (!new_sl)
return -ENOMEM;
env->total_states++;
@@ -20041,7 +20042,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
err = copy_verifier_state(new, cur);
if (err) {
free_verifier_state(new, false);
- kfree(new_sl);
+ kmem_cache_free(bpf_verifier_state_list_cachep, new_sl);
return err;
}
new->insn_idx = insn_idx;
@@ -20051,7 +20052,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
err = maybe_enter_scc(env, new);
if (err) {
free_verifier_state(new, false);
- kfree(new_sl);
+ kmem_cache_free(bpf_verifier_state_list_cachep, new_sl);
return err;
}
@@ -23711,7 +23712,7 @@ static void free_states(struct bpf_verifier_env *env)
list_for_each_safe(pos, tmp, &env->free_list) {
sl = container_of(pos, struct bpf_verifier_state_list, node);
free_verifier_state(&sl->state, false);
- kfree(sl);
+ kmem_cache_free(bpf_verifier_state_list_cachep, sl);
}
INIT_LIST_HEAD(&env->free_list);
@@ -23734,7 +23735,7 @@ static void free_states(struct bpf_verifier_env *env)
list_for_each_safe(pos, tmp, head) {
sl = container_of(pos, struct bpf_verifier_state_list, node);
free_verifier_state(&sl->state, false);
- kfree(sl);
+ kmem_cache_free(bpf_verifier_state_list_cachep, sl);
}
INIT_LIST_HEAD(&env->explored_states[i]);
}
@@ -25396,3 +25397,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
kvfree(env);
return ret;
}
+
+static int __init bpf_verifier_init(void)
+{
+ bpf_verifier_state_list_cachep = kmem_cache_create("bpf_verifier_state_list",
+ sizeof(struct bpf_verifier_state_list),
+ 0, SLAB_PANIC, NULL);
+ return 0;
+}
+late_initcall(bpf_verifier_init);
--
2.43.0
Powered by blists - more mailing lists