From 5e44ec29f5aa037ba8d1188ccee456ab3996d03e Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 21 Jun 2023 17:17:52 +0800 Subject: [PATCH] bpf: Check leaked objects --- kernel/bpf/memalloc.c | 48 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 3081d06a434c..2bdb894392c5 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -565,8 +565,50 @@ static void drain_mem_cache(struct bpf_mem_cache *c) free_all(llist_del_all(&c->waiting_for_gp), percpu); } -static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) +static void check_mem_cache(struct bpf_mem_cache *c, bool direct) +{ + if (!llist_empty(&c->free_by_rcu_ttrace)) + pr_warn("leak: free_by_rcu_ttrace %d\n", direct); + if (!llist_empty(&c->waiting_for_gp_ttrace)) + pr_warn("leak: waiting_for_gp_ttrace %d\n", direct); + if (!llist_empty(&c->free_llist)) + pr_warn("leak: free_llist %d\n", direct); + if (!llist_empty(&c->free_llist_extra)) + pr_warn("leak: free_llist_extra %d\n", direct); + if (!llist_empty(&c->free_by_rcu)) + pr_warn("leak: free_by_rcu %d\n", direct); + if (!llist_empty(&c->free_llist_extra_rcu)) + pr_warn("leak: free_llist_extra_rcu %d\n", direct); + if (!llist_empty(&c->waiting_for_gp)) + pr_warn("leak: waiting_for_gp %d\n", direct); +} + +static void check_leaked_objs(struct bpf_mem_alloc *ma, bool direct) { + struct bpf_mem_caches *cc; + struct bpf_mem_cache *c; + int cpu, i; + + if (ma->cache) { + for_each_possible_cpu(cpu) { + c = per_cpu_ptr(ma->cache, cpu); + check_mem_cache(c, direct); + } + } + if (ma->caches) { + for_each_possible_cpu(cpu) { + cc = per_cpu_ptr(ma->caches, cpu); + for (i = 0; i < NUM_CACHES; i++) { + c = &cc->cache[i]; + check_mem_cache(c, direct); + } + } + } +} + +static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma, bool direct) +{ + check_leaked_objs(ma, direct); free_percpu(ma->cache); free_percpu(ma->caches); ma->cache = NULL; @@ -589,7 +631,7 @@ static void free_mem_alloc(struct bpf_mem_alloc *ma) rcu_barrier_tasks_trace(); /* wait for __free_rcu */ if (!rcu_trace_implies_rcu_gp()) rcu_barrier(); - free_mem_alloc_no_barrier(ma); + free_mem_alloc_no_barrier(ma, false); } static void free_mem_alloc_deferred(struct work_struct *work) @@ -608,7 +650,7 @@ static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) /* Fast path. No callbacks are pending, hence no need to do * rcu_barrier-s. */ - free_mem_alloc_no_barrier(ma); + free_mem_alloc_no_barrier(ma, true); return; } -- 2.39.2