lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <20190412100018.20852-1-alban@kinvolk.io> Date: Fri, 12 Apr 2019 12:00:17 +0200 From: Alban Crequy <alban.crequy@...il.com> To: john.fastabend@...il.com, ast@...nel.org, daniel@...earbox.net Cc: bpf@...r.kernel.org, netdev@...r.kernel.org, linux-kernel@...r.kernel.org, alban@...volk.io, iago@...volk.io Subject: [PATCH bpf-next v1 1/2] bpf: sock ops: add netns in bpf context From: Alban Crequy <alban@...volk.io> sockops programs can now access the network namespace inode via (struct bpf_sock_ops)->netns. This can be useful to apply different policies on different network namespaces. In the unlikely case where network namespaces are not compiled in (CONFIG_NET_NS=n), the verifier will not allow access to ->netns. Signed-off-by: Alban Crequy <alban@...volk.io> --- include/uapi/linux/bpf.h | 1 + net/core/filter.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 31a27dd337dc..5afaab25f205 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3069,6 +3069,7 @@ struct bpf_sock_ops { __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; + __u64 netns; }; /* Definitions for bpf_sock_ops_cb_flags */ diff --git a/net/core/filter.c b/net/core/filter.c index 22eb2edf5573..f5e75b6fecb2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6758,6 +6758,14 @@ static bool sock_ops_is_valid_access(int off, int size, } } else { switch (off) { + case offsetof(struct bpf_sock_ops, netns): +#ifdef CONFIG_NET_NS + if (size != sizeof(__u64)) + return false; +#else + return false; +#endif + break; case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received, bytes_acked): if (size != sizeof(__u64)) @@ -7908,6 +7916,38 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, struct sock, type); break; + + case offsetof(struct bpf_sock_ops, netns): +#ifdef CONFIG_NET_NS + /* Loading: sk_ops->sk->__sk_common.skc_net.net->ns.inum + * Type: (struct bpf_sock_ops_kern *) + * ->(struct sock *) + * ->(struct sock_common) + * .possible_net_t + * .(struct net *) + * ->(struct ns_common) + * .(unsigned int) + */ + BUILD_BUG_ON(offsetof(struct sock, __sk_common) != 0); + BUILD_BUG_ON(offsetof(possible_net_t, net) != 0); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( + struct bpf_sock_ops_kern, sk), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, sk)); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( + possible_net_t, net), + si->dst_reg, si->dst_reg, + offsetof(struct sock_common, skc_net)); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( + struct ns_common, inum), + si->dst_reg, si->dst_reg, + offsetof(struct net, ns) + + offsetof(struct ns_common, inum)); +#else + *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); +#endif + break; + } return insn - insn_buf; } -- 2.20.1
Powered by blists - more mailing lists