[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190228185749.GJ9508@kernel.org>
Date: Thu, 28 Feb 2019 15:57:49 -0300
From: Arnaldo Carvalho de Melo <arnaldo.melo@...il.com>
To: Andrii Nakryiko <andriin@...com>
Cc: andrii.nakryiko@...il.com, kernel-team@...com, ast@...com,
netdev@...r.kernel.org, bpf@...r.kernel.org, daniel@...earbox.net
Subject: Re: [PATCH bpf-next 3/5] btf: allow to customize dedup hash table
size
Em Wed, Feb 27, 2019 at 02:46:39PM -0800, Andrii Nakryiko escreveu:
> Default size of dedup table (16k) is good enough for most binaries, even
> typical vmlinux images. But there are cases of binaries with huge amount
> of BTF types (e.g., allyesconfig variants of kernel), which benefit from
> having bigger dedup table size to lower amount of unnecessary hash
> collisions. Tools like pahole, thus, can tune this parameter to reach
> optimal performance.
>
> This change also serves double purpose of allowing tests to force hash
> collisions to test some corner cases, used in follow up patch.
>
> Signed-off-by: Andrii Nakryiko <andriin@...com>
> ---
> tools/lib/bpf/btf.c | 43 ++++++++++++++++++++++++++-----------------
> tools/lib/bpf/btf.h | 1 +
> 2 files changed, 27 insertions(+), 17 deletions(-)
>
> diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
> index 68b50e9bbde1..6bbb710216e6 100644
> --- a/tools/lib/bpf/btf.c
> +++ b/tools/lib/bpf/btf.c
> @@ -1070,8 +1070,7 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
> return err;
> }
>
> -#define BTF_DEDUP_TABLE_SIZE_LOG 14
> -#define BTF_DEDUP_TABLE_MOD ((1 << BTF_DEDUP_TABLE_SIZE_LOG) - 1)
> +#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14)
> #define BTF_UNPROCESSED_ID ((__u32)-1)
> #define BTF_IN_PROGRESS_ID ((__u32)-2)
>
> @@ -1128,18 +1127,21 @@ static inline __u32 hash_combine(__u32 h, __u32 value)
> #undef GOLDEN_RATIO_PRIME
> }
>
> -#define for_each_hash_node(table, hash, node) \
> - for (node = table[hash & BTF_DEDUP_TABLE_MOD]; node; node = node->next)
> +#define for_each_dedup_cand(d, hash, node) \
> + for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \
> + node; \
> + node = node->next)
>
> static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id)
> {
> struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node));
> + int bucket = hash & (d->opts.dedup_table_size - 1);
>
> if (!node)
> return -ENOMEM;
> node->type_id = type_id;
> - node->next = d->dedup_table[hash & BTF_DEDUP_TABLE_MOD];
> - d->dedup_table[hash & BTF_DEDUP_TABLE_MOD] = node;
> + node->next = d->dedup_table[bucket];
> + d->dedup_table[bucket] = node;
> return 0;
> }
>
> @@ -1177,7 +1179,7 @@ static void btf_dedup_table_free(struct btf_dedup *d)
> if (!d->dedup_table)
> return;
>
> - for (i = 0; i < (1 << BTF_DEDUP_TABLE_SIZE_LOG); i++) {
> + for (i = 0; i < d->opts.dedup_table_size; i++) {
> while (d->dedup_table[i]) {
> tmp = d->dedup_table[i];
> d->dedup_table[i] = tmp->next;
> @@ -1221,10 +1223,19 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
> if (!d)
> return ERR_PTR(-ENOMEM);
>
> + d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
Is the above line a leftover from some other patch? Doesn't seem related
to this patch.
The rest seems ok.
> + /* ensure table size is power of two and limit to 2G */
> + d->opts.dedup_table_size = opts && opts->dedup_table_size
> + ? opts->dedup_table_size
> + : BTF_DEDUP_TABLE_DEFAULT_SIZE;
> + for (i = 0; i < 31 && (1 << i) < d->opts.dedup_table_size; i++)
> + ;
> + d->opts.dedup_table_size = 1 << i;
> +
> d->btf = btf;
> d->btf_ext = btf_ext;
>
> - d->dedup_table = calloc(1 << BTF_DEDUP_TABLE_SIZE_LOG,
> + d->dedup_table = calloc(d->opts.dedup_table_size,
> sizeof(struct btf_dedup_node *));
> if (!d->dedup_table) {
> err = -ENOMEM;
> @@ -1249,8 +1260,6 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
> for (i = 0; i <= btf->nr_types; i++)
> d->hypot_map[i] = BTF_UNPROCESSED_ID;
>
> - d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
> -
> done:
> if (err) {
> btf_dedup_free(d);
> @@ -1824,7 +1833,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
>
> case BTF_KIND_INT:
> h = btf_hash_int(t);
> - for_each_hash_node(d->dedup_table, h, cand_node) {
> + for_each_dedup_cand(d, h, cand_node) {
> cand = d->btf->types[cand_node->type_id];
> if (btf_equal_int(t, cand)) {
> new_id = cand_node->type_id;
> @@ -1835,7 +1844,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
>
> case BTF_KIND_ENUM:
> h = btf_hash_enum(t);
> - for_each_hash_node(d->dedup_table, h, cand_node) {
> + for_each_dedup_cand(d, h, cand_node) {
> cand = d->btf->types[cand_node->type_id];
> if (btf_equal_enum(t, cand)) {
> new_id = cand_node->type_id;
> @@ -1846,7 +1855,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
>
> case BTF_KIND_FWD:
> h = btf_hash_common(t);
> - for_each_hash_node(d->dedup_table, h, cand_node) {
> + for_each_dedup_cand(d, h, cand_node) {
> cand = d->btf->types[cand_node->type_id];
> if (btf_equal_common(t, cand)) {
> new_id = cand_node->type_id;
> @@ -2263,7 +2272,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
> return 0;
>
> h = btf_hash_struct(t);
> - for_each_hash_node(d->dedup_table, h, cand_node) {
> + for_each_dedup_cand(d, h, cand_node) {
> int eq;
>
> btf_dedup_clear_hypot_map(d);
> @@ -2349,7 +2358,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
> t->type = ref_type_id;
>
> h = btf_hash_common(t);
> - for_each_hash_node(d->dedup_table, h, cand_node) {
> + for_each_dedup_cand(d, h, cand_node) {
> cand = d->btf->types[cand_node->type_id];
> if (btf_equal_common(t, cand)) {
> new_id = cand_node->type_id;
> @@ -2372,7 +2381,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
> info->index_type = ref_type_id;
>
> h = btf_hash_array(t);
> - for_each_hash_node(d->dedup_table, h, cand_node) {
> + for_each_dedup_cand(d, h, cand_node) {
> cand = d->btf->types[cand_node->type_id];
> if (btf_equal_array(t, cand)) {
> new_id = cand_node->type_id;
> @@ -2403,7 +2412,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
> }
>
> h = btf_hash_fnproto(t);
> - for_each_hash_node(d->dedup_table, h, cand_node) {
> + for_each_dedup_cand(d, h, cand_node) {
> cand = d->btf->types[cand_node->type_id];
> if (btf_equal_fnproto(t, cand)) {
> new_id = cand_node->type_id;
> diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
> index b60bb7cf5fff..28a1e1e59861 100644
> --- a/tools/lib/bpf/btf.h
> +++ b/tools/lib/bpf/btf.h
> @@ -90,6 +90,7 @@ LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
> LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
>
> struct btf_dedup_opts {
> + unsigned int dedup_table_size;
> bool dont_resolve_fwds;
> };
>
> --
> 2.17.1
--
- Arnaldo
Powered by blists - more mailing lists