lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220604063556.qyhsssqgp2stw73q@kafai-mbp>
Date:   Fri, 3 Jun 2022 23:35:56 -0700
From:   Martin KaFai Lau <kafai@...com>
To:     Stanislav Fomichev <sdf@...gle.com>
Cc:     netdev@...r.kernel.org, bpf@...r.kernel.org, ast@...nel.org,
        daniel@...earbox.net, andrii@...nel.org
Subject: Re: [PATCH bpf-next v8 04/11] bpf: minimize number of allocated lsm
 slots per program

On Wed, Jun 01, 2022 at 12:02:11PM -0700, Stanislav Fomichev wrote:
> diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
> index 0f72020bfdcf..83aa431dd52e 100644
> --- a/kernel/bpf/bpf_lsm.c
> +++ b/kernel/bpf/bpf_lsm.c
> @@ -69,11 +69,6 @@ void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
>  		*bpf_func = __cgroup_bpf_run_lsm_current;
>  }
>  
> -int bpf_lsm_hook_idx(u32 btf_id)
> -{
> -	return btf_id_set_index(&bpf_lsm_hooks, btf_id);
The implementation of btf_id_set_index() added in patch 3
should be removed also.

> -}
> -
>  int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
>  			const struct bpf_prog *prog)
>  {
> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index 66b644a76a69..a27a6a7bd852 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -129,12 +129,46 @@ unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
>  }
>  
>  #ifdef CONFIG_BPF_LSM
> +u32 cgroup_lsm_atype_btf_id[CGROUP_LSM_NUM];
static

> +
>  static enum cgroup_bpf_attach_type
>  bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
>  {
> +	int i;
> +
> +	lockdep_assert_held(&cgroup_mutex);
> +
>  	if (attach_type != BPF_LSM_CGROUP)
>  		return to_cgroup_bpf_attach_type(attach_type);
> -	return CGROUP_LSM_START + bpf_lsm_hook_idx(attach_btf_id);
> +
> +	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype_btf_id); i++)
> +		if (cgroup_lsm_atype_btf_id[i] == attach_btf_id)
> +			return CGROUP_LSM_START + i;
> +
> +	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype_btf_id); i++)
> +		if (cgroup_lsm_atype_btf_id[i] == 0)
> +			return CGROUP_LSM_START + i;
> +
> +	return -E2BIG;
> +
> +}
> +
> +static void bpf_cgroup_atype_alloc(u32 attach_btf_id, int cgroup_atype)
> +{
> +	int i = cgroup_atype - CGROUP_LSM_START;
> +
> +	lockdep_assert_held(&cgroup_mutex);
> +
> +	cgroup_lsm_atype_btf_id[i] = attach_btf_id;
> +}
> +
> +void bpf_cgroup_atype_free(int cgroup_atype)
> +{
> +	int i = cgroup_atype - CGROUP_LSM_START;
> +
> +	mutex_lock(&cgroup_mutex);
> +	cgroup_lsm_atype_btf_id[i] = 0;
I think holding cgroup_mutex in the __cgroup_bpf_attach() and
bpf_cgroup_atype_free() is not enough.

Consider a case that __cgroup_bpf_attach() runs first and bpf_trampoline_link_cgroup_shim()
cannot find the shim_link because it is unlinked and its shim_prog
is currently still under the bpf_prog_free_deferred's deadrow.
Then bpf_prog_free_deferred() got run and do the bpf_cgroup_atype_free().

A refcnt is still needed.  It is better to put them together in a
struct instead of having two arrays, like

struct cgroup_lsm_atype {
       u32 attach_btf_id;
       u32 refcnt;
};

> +	mutex_unlock(&cgroup_mutex);
>  }
>  #else
>  static enum cgroup_bpf_attach_type
> @@ -144,6 +178,14 @@ bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
>  		return to_cgroup_bpf_attach_type(attach_type);
>  	return -EOPNOTSUPP;
>  }
> +
> +static void bpf_cgroup_atype_alloc(u32 attach_btf_id, int cgroup_atype)
> +{
> +}
> +
> +void bpf_cgroup_atype_free(int cgroup_atype)
> +{
> +}
>  #endif /* CONFIG_BPF_LSM */
>  
>  void cgroup_bpf_offline(struct cgroup *cgrp)
> @@ -659,6 +701,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
>  		err = bpf_trampoline_link_cgroup_shim(new_prog, &tgt_info, atype);
>  		if (err)
>  			goto cleanup;
> +		bpf_cgroup_atype_alloc(new_prog->aux->attach_btf_id, atype);
This atype alloc (or refcnt inc) should be done in
cgroup_shim_alloc() where the shim_prog is the one holding
the refcnt of the atype.  If the above "!old_prog" needs to be
removed as the earlier comment in patch 3, bumping the atype refcnt
here will be wrong.

>  	}
>  
>  	err = update_effective_progs(cgrp, atype);
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index 091ee210842f..224bb4d4fe4e 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -107,6 +107,9 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
>  	fp->aux->prog = fp;
>  	fp->jit_requested = ebpf_jit_enabled();
>  	fp->blinding_requested = bpf_jit_blinding_enabled(fp);
> +#ifdef CONFIG_BPF_LSM
I don't think this is needed.

> +	aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
> +#endif
>  
>  	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
>  	mutex_init(&fp->aux->used_maps_mutex);
> @@ -2558,6 +2561,10 @@ static void bpf_prog_free_deferred(struct work_struct *work)
>  	aux = container_of(work, struct bpf_prog_aux, work);
>  #ifdef CONFIG_BPF_SYSCALL
>  	bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
> +#endif
> +#ifdef CONFIG_BPF_LSM
Same here.

> +	if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
> +		bpf_cgroup_atype_free(aux->cgroup_atype);
>  #endif
>  	bpf_free_used_maps(aux);
>  	bpf_free_used_btfs(aux);
> -- 
> 2.36.1.255.ge46751e96f-goog
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ