[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZN3YeyMkgEg1IoKP@krava>
Date: Thu, 17 Aug 2023 10:21:15 +0200
From: Jiri Olsa <olsajiri@...il.com>
To: Rong Tao <rtoax@...mail.com>
Cc: sdf@...gle.com, daniel@...earbox.net, andrii@...nel.org,
rongtao@...tc.cn, Alexei Starovoitov <ast@...nel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Song Liu <song@...nel.org>,
Yonghong Song <yonghong.song@...ux.dev>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>, Hao Luo <haoluo@...gle.com>,
Mykola Lysenko <mykolal@...com>, Shuah Khan <shuah@...nel.org>,
"open list:BPF [GENERAL] (Safe Dynamic Programs and Tools)"
<bpf@...r.kernel.org>,
"open list:KERNEL SELFTEST FRAMEWORK"
<linux-kselftest@...r.kernel.org>,
open list <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH bpf-next v5] selftests/bpf: trace_helpers.c: optimize
kallsyms cache
On Thu, Aug 17, 2023 at 01:03:45PM +0800, Rong Tao wrote:
> From: Rong Tao <rongtao@...tc.cn>
>
> Static ksyms often have problems because the number of symbols exceeds the
> MAX_SYMS limit. Like changing the MAX_SYMS from 300000 to 400000 in
> commit e76a014334a6("selftests/bpf: Bump and validate MAX_SYMS") solves
> the problem somewhat, but it's not the perfect way.
>
> This commit uses dynamic memory allocation, which completely solves the
> problem caused by the limitation of the number of kallsyms.
>
> Acked-by: Stanislav Fomichev <sdf@...gle.com>
> Signed-off-by: Rong Tao <rongtao@...tc.cn>
> ---
> v5: Release the allocated memory once the load_kallsyms_refresh() upon error
> given it's dynamically allocated.
> v4: https://lore.kernel.org/lkml/tencent_59C74613113F0C728524B2A82FE5540A5E09@qq.com/
> Make sure most cases we don't need the realloc() path to begin with,
> and check strdup() return value.
> v3: https://lore.kernel.org/lkml/tencent_50B4B2622FE7546A5FF9464310650C008509@qq.com/
> Do not use structs and judge ksyms__add_symbol function return value.
> v2: https://lore.kernel.org/lkml/tencent_B655EE5E5D463110D70CD2846AB3262EED09@qq.com/
> Do the usual len/capacity scheme here to amortize the cost of realloc, and
> don't free symbols.
> v1: https://lore.kernel.org/lkml/tencent_AB461510B10CD484E0B2F62E3754165F2909@qq.com/
> ---
> tools/testing/selftests/bpf/trace_helpers.c | 62 +++++++++++++++++----
> 1 file changed, 52 insertions(+), 10 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
> index f83d9f65c65b..0053ba22f0cb 100644
> --- a/tools/testing/selftests/bpf/trace_helpers.c
> +++ b/tools/testing/selftests/bpf/trace_helpers.c
> @@ -18,10 +18,47 @@
> #define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
> #define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
>
> -#define MAX_SYMS 400000
> -static struct ksym syms[MAX_SYMS];
> +static struct ksym *syms;
> +static int sym_cap;
> static int sym_cnt;
>
> +static int ksyms__add_symbol(const char *name, unsigned long addr)
> +{
> + void *tmp;
> + unsigned int new_cap;
> +
> + if (sym_cnt + 1 > sym_cap) {
> + new_cap = sym_cap * 4 / 3;
> + tmp = realloc(syms, sizeof(struct ksym) * new_cap);
> + if (!tmp)
> + return -ENOMEM;
> + syms = tmp;
> + sym_cap = new_cap;
> + }
sorry I did not notice earlier, but we have helper for realloc
libbpf_ensure_mem
check the usage for example in prog_tests/kprobe_multi_test.c
> +
> + tmp = strdup(name);
> + if (!tmp)
> + return -ENOMEM;
> + syms[sym_cnt].addr = addr;
> + syms[sym_cnt].name = tmp;
> +
> + sym_cnt++;
> +
> + return 0;
> +}
> +
> +static void ksyms__free(void)
> +{
> + unsigned int i;
> +
> + if (!syms)
> + return;
> +
> + for (i = 0; i < sym_cnt; i++)
> + free(syms[i].name);
> + free(syms);
> +}
> +
> static int ksym_cmp(const void *p1, const void *p2)
> {
> return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
> @@ -33,9 +70,14 @@ int load_kallsyms_refresh(void)
> char func[256], buf[256];
> char symbol;
> void *addr;
> - int i = 0;
> + int ret;
>
> + /* Make sure most cases we don't need the realloc() path to begin with */
> + sym_cap = 400000;
> sym_cnt = 0;
> + syms = malloc(sizeof(struct ksym) * sym_cap);
> + if (!syms)
> + return -ENOMEM;
libbpf_ensure_mem will also take care of first allocation and the capacity increase
jirka
>
> f = fopen("/proc/kallsyms", "r");
> if (!f)
> @@ -46,17 +88,17 @@ int load_kallsyms_refresh(void)
> break;
> if (!addr)
> continue;
> - if (i >= MAX_SYMS)
> - return -EFBIG;
> -
> - syms[i].addr = (long) addr;
> - syms[i].name = strdup(func);
> - i++;
> + ret = ksyms__add_symbol(func, (unsigned long)addr);
> + if (ret)
> + goto error;
> }
> fclose(f);
> - sym_cnt = i;
> qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
> return 0;
> +
> +error:
> + ksyms__free();
> + return ret;
> }
>
> int load_kallsyms(void)
> --
> 2.39.3
>
>
Powered by blists - more mailing lists