lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 23 May 2022 16:33:47 -0700
From:   Andrii Nakryiko <andrii.nakryiko@...il.com>
To:     Stanislav Fomichev <sdf@...gle.com>
Cc:     Networking <netdev@...r.kernel.org>, bpf <bpf@...r.kernel.org>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>
Subject: Re: [PATCH bpf-next v7 11/11] selftests/bpf: verify lsm_cgroup struct
 sock access

On Wed, May 18, 2022 at 3:56 PM Stanislav Fomichev <sdf@...gle.com> wrote:
>
> sk_priority & sk_mark are writable, the rest is readonly.
>
> One interesting thing here is that the verifier doesn't
> really force me to add NULL checks anywhere :-/
>
> Signed-off-by: Stanislav Fomichev <sdf@...gle.com>
> ---
>  .../selftests/bpf/prog_tests/lsm_cgroup.c     | 69 +++++++++++++++++++
>  1 file changed, 69 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
> index 29292ec40343..64b6830e03f5 100644
> --- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
> +++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
> @@ -270,8 +270,77 @@ static void test_lsm_cgroup_functional(void)
>         lsm_cgroup__destroy(skel);
>  }
>
> +static int field_offset(const char *type, const char *field)
> +{
> +       const struct btf_member *memb;
> +       const struct btf_type *tp;
> +       const char *name;
> +       struct btf *btf;
> +       int btf_id;
> +       int i;
> +
> +       btf = btf__load_vmlinux_btf();
> +       if (!btf)
> +               return -1;
> +
> +       btf_id = btf__find_by_name_kind(btf, type, BTF_KIND_STRUCT);
> +       if (btf_id < 0)
> +               return -1;
> +
> +       tp = btf__type_by_id(btf, btf_id);
> +       memb = btf_members(tp);
> +
> +       for (i = 0; i < btf_vlen(tp); i++) {
> +               name = btf__name_by_offset(btf,
> +                                          memb->name_off);
> +               if (strcmp(field, name) == 0)
> +                       return memb->offset / 8;
> +               memb++;
> +       }
> +
> +       return -1;
> +}
> +
> +static bool sk_writable_field(const char *type, const char *field, int size)
> +{
> +       LIBBPF_OPTS(bpf_prog_load_opts, opts,
> +                   .expected_attach_type = BPF_LSM_CGROUP);
> +       struct bpf_insn insns[] = {
> +               /* r1 = *(u64 *)(r1 + 0) */
> +               BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
> +               /* r1 = *(u64 *)(r1 + offsetof(struct socket, sk)) */
> +               BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, field_offset("socket", "sk")),
> +               /* r2 = *(u64 *)(r1 + offsetof(struct sock, <field>)) */
> +               BPF_LDX_MEM(size, BPF_REG_2, BPF_REG_1, field_offset(type, field)),
> +               /* *(u64 *)(r1 + offsetof(struct sock, <field>)) = r2 */
> +               BPF_STX_MEM(size, BPF_REG_1, BPF_REG_2, field_offset(type, field)),
> +               BPF_MOV64_IMM(BPF_REG_0, 1),
> +               BPF_EXIT_INSN(),
> +       };
> +       int fd;

This is really not much better than test_verifier assembly. What I had
in mind when I was suggesting to use test_progs was that you'd have a
normal C source code for BPF part, something like this:

__u64 tmp;

SEC("?lsm_cgroup/socket_bind")
int BPF_PROG(access1_bad, struct socket *sock, struct sockaddr
*address, int addrlen)
{
    *(volatile u16 *)(sock->sk.skc_family) = *(volatile u16
*)sock->sk.skc_family;
    return 0;
}


SEC("?lsm_cgroup/socket_bind")
int BPF_PROG(access2_bad, struct socket *sock, struct sockaddr
*address, int addrlen)
{
    *(volatile u64 *)(sock->sk.sk_sndtimeo) = *(volatile u64
*)sock->sk.sk_sndtimeo;
    return 0;
}

and so on. From user-space you'd be loading just one of those
accessX_bad programs at a time (note SEC("?"))


But having said that, what you did is pretty self-contained, so not
too bad. It's just not what I was suggesting :)

> +
> +       opts.attach_btf_id = libbpf_find_vmlinux_btf_id("socket_post_create",
> +                                                       opts.expected_attach_type);
> +
> +       fd = bpf_prog_load(BPF_PROG_TYPE_LSM, NULL, "GPL", insns, ARRAY_SIZE(insns), &opts);
> +       if (fd >= 0)
> +               close(fd);
> +       return fd >= 0;
> +}
> +
> +static void test_lsm_cgroup_access(void)
> +{
> +       ASSERT_FALSE(sk_writable_field("sock_common", "skc_family", BPF_H), "skc_family");
> +       ASSERT_FALSE(sk_writable_field("sock", "sk_sndtimeo", BPF_DW), "sk_sndtimeo");
> +       ASSERT_TRUE(sk_writable_field("sock", "sk_priority", BPF_W), "sk_priority");
> +       ASSERT_TRUE(sk_writable_field("sock", "sk_mark", BPF_W), "sk_mark");
> +       ASSERT_FALSE(sk_writable_field("sock", "sk_pacing_rate", BPF_DW), "sk_pacing_rate");
> +}
> +
>  void test_lsm_cgroup(void)
>  {
>         if (test__start_subtest("functional"))
>                 test_lsm_cgroup_functional();
> +       if (test__start_subtest("access"))
> +               test_lsm_cgroup_access();
>  }
> --
> 2.36.1.124.g0e6072fb45-goog
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ