[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220617005829.66pboow5uubbrdcu@kafai-mbp>
Date: Thu, 16 Jun 2022 17:58:29 -0700
From: Martin KaFai Lau <kafai@...com>
To: Stanislav Fomichev <sdf@...gle.com>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org, ast@...nel.org,
daniel@...earbox.net, andrii@...nel.org
Subject: Re: [PATCH bpf-next v9 05/10] bpf: implement BPF_PROG_QUERY for
BPF_LSM_CGROUP
On Fri, Jun 10, 2022 at 09:57:58AM -0700, Stanislav Fomichev wrote:
> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index ba402d50e130..c869317479ec 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -1029,57 +1029,92 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
> static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
> union bpf_attr __user *uattr)
> {
> + __u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
> __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
> enum bpf_attach_type type = attr->query.attach_type;
> + enum cgroup_bpf_attach_type from_atype, to_atype;
> enum cgroup_bpf_attach_type atype;
> struct bpf_prog_array *effective;
> struct hlist_head *progs;
> struct bpf_prog *prog;
> int cnt, ret = 0, i;
> + int total_cnt = 0;
> u32 flags;
>
> - atype = to_cgroup_bpf_attach_type(type);
> - if (atype < 0)
> - return -EINVAL;
> + if (type == BPF_LSM_CGROUP) {
> + if (attr->query.prog_cnt && prog_ids && !prog_attach_flags)
> + return -EINVAL;
>
> - progs = &cgrp->bpf.progs[atype];
> - flags = cgrp->bpf.flags[atype];
> + from_atype = CGROUP_LSM_START;
> + to_atype = CGROUP_LSM_END;
> + flags = 0;
> + } else {
> + from_atype = to_cgroup_bpf_attach_type(type);
> + if (from_atype < 0)
> + return -EINVAL;
> + to_atype = from_atype;
> + flags = cgrp->bpf.flags[from_atype];
> + }
>
> - effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
> - lockdep_is_held(&cgroup_mutex));
> + for (atype = from_atype; atype <= to_atype; atype++) {
> + progs = &cgrp->bpf.progs[atype];
nit. Move the 'progs = ...' into the 'else {}' case below.
>
> - if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
> - cnt = bpf_prog_array_length(effective);
> - else
> - cnt = prog_list_length(progs);
> + if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
> + effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
> + lockdep_is_held(&cgroup_mutex));
> + total_cnt += bpf_prog_array_length(effective);
> + } else {
> + total_cnt += prog_list_length(progs);
> + }
> + }
>
> if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
> return -EFAULT;
> - if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
> + if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
> return -EFAULT;
> - if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
> + if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
> /* return early if user requested only program count + flags */
> return 0;
> - if (attr->query.prog_cnt < cnt) {
> - cnt = attr->query.prog_cnt;
> +
> + if (attr->query.prog_cnt < total_cnt) {
> + total_cnt = attr->query.prog_cnt;
> ret = -ENOSPC;
> }
>
> - if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
> - return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
> - } else {
> - struct bpf_prog_list *pl;
> - u32 id;
> + for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
> + progs = &cgrp->bpf.progs[atype];
same here.
> + flags = cgrp->bpf.flags[atype];
and the 'flags = ...' can be moved to 'if (prog_attach_flags) {}'
Others lgtm.
Reviewed-by: Martin KaFai Lau <kafai@...com>
>
> - i = 0;
> - hlist_for_each_entry(pl, progs, node) {
> - prog = prog_list_prog(pl);
> - id = prog->aux->id;
> - if (copy_to_user(prog_ids + i, &id, sizeof(id)))
> - return -EFAULT;
> - if (++i == cnt)
> - break;
> + if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
> + effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
> + lockdep_is_held(&cgroup_mutex));
> + cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
> + ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
> + } else {
> + struct bpf_prog_list *pl;
> + u32 id;
> +
> + cnt = min_t(int, prog_list_length(progs), total_cnt);
> + i = 0;
> + hlist_for_each_entry(pl, progs, node) {
> + prog = prog_list_prog(pl);
> + id = prog->aux->id;
> + if (copy_to_user(prog_ids + i, &id, sizeof(id)))
> + return -EFAULT;
> + if (++i == cnt)
> + break;
> + }
> }
> +
> + if (prog_attach_flags) {
> + for (i = 0; i < cnt; i++)
> + if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags)))
> + return -EFAULT;
> + prog_attach_flags += cnt;
> + }
> +
> + prog_ids += cnt;
> + total_cnt -= cnt;
> }
> return ret;
> }
Powered by blists - more mailing lists