[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210709015119.l5kxp5kao24bjft7@kafai-mbp.dhcp.thefacebook.com>
Date: Thu, 8 Jul 2021 18:51:19 -0700
From: Martin KaFai Lau <kafai@...com>
To: Alexei Starovoitov <alexei.starovoitov@...il.com>
CC: <davem@...emloft.net>, <daniel@...earbox.net>, <andrii@...nel.org>,
<netdev@...r.kernel.org>, <bpf@...r.kernel.org>,
<kernel-team@...com>
Subject: Re: [PATCH v5 bpf-next 04/11] bpf: Add map side support for bpf
timers.
On Wed, Jul 07, 2021 at 06:18:26PM -0700, Alexei Starovoitov wrote:
[ ... ]
> +static void array_map_free_timers(struct bpf_map *map)
> +{
> + struct bpf_array *array = container_of(map, struct bpf_array, map);
> + int i;
> +
> + if (likely(!map_value_has_timer(map)))
> + return;
> +
> + for (i = 0; i < array->map.max_entries; i++)
> + bpf_timer_cancel_and_free(array->value + array->elem_size * i +
> + map->timer_off);
> +}
> +
> /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
> static void array_map_free(struct bpf_map *map)
> {
> @@ -382,6 +402,7 @@ static void array_map_free(struct bpf_map *map)
> if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
> bpf_array_free_percpu(array);
>
> + array_map_free_timers(map);
array_map_free() is called when map->refcnt reached 0.
By then, map->usercnt should have reached 0 before
and array_map_free_timers() should have already been called,
so no need to call it here again? The same goes for hashtab.
[ ... ]
> diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
> index cb4b72997d9b..7780131f710e 100644
> --- a/kernel/bpf/btf.c
> +++ b/kernel/bpf/btf.c
> @@ -3046,43 +3046,92 @@ static void btf_struct_log(struct btf_verifier_env *env,
> btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
> }
>
> -/* find 'struct bpf_spin_lock' in map value.
> - * return >= 0 offset if found
> - * and < 0 in case of error
> - */
> -int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
> +static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t,
> + const char *name, int sz, int align)
> {
> const struct btf_member *member;
> u32 i, off = -ENOENT;
>
> - if (!__btf_type_is_struct(t))
> - return -EINVAL;
> -
> for_each_member(i, t, member) {
> const struct btf_type *member_type = btf_type_by_id(btf,
> member->type);
> if (!__btf_type_is_struct(member_type))
> continue;
> - if (member_type->size != sizeof(struct bpf_spin_lock))
> + if (member_type->size != sz)
> continue;
> - if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
> - "bpf_spin_lock"))
> + if (strcmp(__btf_name_by_offset(btf, member_type->name_off), name))
> continue;
> if (off != -ENOENT)
> - /* only one 'struct bpf_spin_lock' is allowed */
> + /* only one such field is allowed */
> return -E2BIG;
> off = btf_member_bit_offset(t, member);
> if (off % 8)
> /* valid C code cannot generate such BTF */
> return -EINVAL;
> off /= 8;
> - if (off % __alignof__(struct bpf_spin_lock))
> - /* valid struct bpf_spin_lock will be 4 byte aligned */
> + if (off % align)
> + return -EINVAL;
> + }
> + return off;
> +}
> +
> +static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
> + const char *name, int sz, int align)
> +{
> + const struct btf_var_secinfo *vsi;
> + u32 i, off = -ENOENT;
> +
> + for_each_vsi(i, t, vsi) {
> + const struct btf_type *var = btf_type_by_id(btf, vsi->type);
> + const struct btf_type *var_type = btf_type_by_id(btf, var->type);
> +
> + if (!__btf_type_is_struct(var_type))
> + continue;
> + if (var_type->size != sz)
> + continue;
> + if (vsi->size != sz)
> + continue;
> + if (strcmp(__btf_name_by_offset(btf, var_type->name_off), name))
> + continue;
> + if (off != -ENOENT)
> + /* only one such field is allowed */
> + return -E2BIG;
> + off = vsi->offset;
> + if (off % align)
> return -EINVAL;
> }
> return off;
> }
>
> +static int btf_find_field(const struct btf *btf, const struct btf_type *t,
> + const char *name, int sz, int align)
> +{
> +
> + if (__btf_type_is_struct(t))
> + return btf_find_struct_field(btf, t, name, sz, align);
> + else if (btf_type_is_datasec(t))
> + return btf_find_datasec_var(btf, t, name, sz, align);
iiuc, a global struct bpf_timer is not supported. I am not sure
why it needs to find the timer in datasec here. or it meant to error out
and potentially give a verifier log? I don't see where is the verifier
reporting error though.
> +static void htab_free_malloced_timers(struct bpf_htab *htab)
> +{
> + int i;
> +
> + rcu_read_lock();
> + for (i = 0; i < htab->n_buckets; i++) {
> + struct hlist_nulls_head *head = select_bucket(htab, i);
> + struct hlist_nulls_node *n;
> + struct htab_elem *l;
> +
> + hlist_nulls_for_each_entry(l, n, head, hash_node)
need the _rcu() variant here.
May be put rcu_read_lock/unlock() in the loop and do a
cond_resched() in case the hashtab is large.
Powered by blists - more mailing lists