[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180912003640.28316-8-joe@wand.net.nz>
Date: Tue, 11 Sep 2018 17:36:36 -0700
From: Joe Stringer <joe@...d.net.nz>
To: daniel@...earbox.net
Cc: netdev@...r.kernel.org, ast@...nel.org, john.fastabend@...il.com,
tgraf@...g.ch, kafai@...com, nitin.hande@...il.com,
mauricio.vasquez@...ito.it
Subject: [PATCH bpf-next 07/11] bpf: Add helper to retrieve socket in BPF
This patch adds new BPF helper functions, bpf_sk_lookup_tcp() and
bpf_sk_lookup_udp() which allows BPF programs to find out if there is a
socket listening on this host, and returns a socket pointer which the
BPF program can then access to determine, for instance, whether to
forward or drop traffic. bpf_sk_lookup_xxx() may take a reference on the
socket, so when a BPF program makes use of this function, it must
subsequently pass the returned pointer into the newly added sk_release()
to return the reference.
By way of example, the following pseudocode would filter inbound
connections at XDP if there is no corresponding service listening for
the traffic:
struct bpf_sock_tuple tuple;
struct bpf_sock_ops *sk;
populate_tuple(ctx, &tuple); // Extract the 5tuple from the packet
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof tuple, netns, 0);
if (!sk) {
// Couldn't find a socket listening for this traffic. Drop.
return TC_ACT_SHOT;
}
bpf_sk_release(sk, 0);
return TC_ACT_OK;
Signed-off-by: Joe Stringer <joe@...d.net.nz>
---
include/uapi/linux/bpf.h | 54 +++++++-
kernel/bpf/verifier.c | 8 +-
net/core/filter.c | 145 ++++++++++++++++++++++
tools/include/uapi/linux/bpf.h | 54 +++++++-
tools/testing/selftests/bpf/bpf_helpers.h | 12 ++
5 files changed, 270 insertions(+), 3 deletions(-)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 66917a4eba27..8ed6e293113f 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2141,6 +2141,41 @@ union bpf_attr {
* request in the skb.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * struct bpf_sock_ops *bpf_sk_lookup_tcp(ctx, tuple, tuple_size, netns, flags)
+ * Decription
+ * Look for TCP socket matching 'tuple'. The return value must
+ * be checked, and if non-NULL, released via bpf_sk_release().
+ * @ctx: pointer to ctx
+ * @tuple: pointer to struct bpf_sock_tuple
+ * @tuple_size: size of the tuple
+ * @netns: network namespace id
+ * @flags: flags value
+ * Return
+ * pointer to socket ops on success, or
+ * NULL in case of failure
+ *
+ * struct bpf_sock_ops *bpf_sk_lookup_udp(ctx, tuple, tuple_size, netns, flags)
+ * Decription
+ * Look for UDP socket matching 'tuple'. The return value must
+ * be checked, and if non-NULL, released via bpf_sk_release().
+ * @ctx: pointer to ctx
+ * @tuple: pointer to struct bpf_sock_tuple
+ * @tuple_size: size of the tuple
+ * @netns: network namespace id
+ * @flags: flags value
+ * Return
+ * pointer to socket ops on success, or
+ * NULL in case of failure
+ *
+ * int bpf_sk_release(sock, flags)
+ * Description
+ * Release the reference held by 'sock'.
+ * @sock: Pointer reference to release. Must be found via
+ * bpf_sk_lookup_xxx().
+ * @flags: flags value
+ * Return
+ * 0 on success, or a negative error in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2226,7 +2261,10 @@ union bpf_attr {
FN(get_current_cgroup_id), \
FN(get_local_storage), \
FN(sk_select_reuseport), \
- FN(skb_ancestor_cgroup_id),
+ FN(skb_ancestor_cgroup_id), \
+ FN(sk_lookup_tcp), \
+ FN(sk_lookup_udp), \
+ FN(sk_release),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2395,6 +2433,20 @@ struct bpf_sock {
*/
};
+struct bpf_sock_tuple {
+ union {
+ __be32 ipv6[4];
+ __be32 ipv4;
+ } saddr;
+ union {
+ __be32 ipv6[4];
+ __be32 ipv4;
+ } daddr;
+ __be16 sport;
+ __be16 dport;
+ __u8 family;
+};
+
#define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type.
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 67c62ef67d37..37feedaaa1c3 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -153,6 +153,12 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
* PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
* passes through a NULL-check conditional. For the branch wherein the state is
* changed to CONST_IMM, the verifier releases the reference.
+ *
+ * For each helper function that allocates a reference, such as
+ * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
+ * bpf_sk_release(). When a reference type passes into the release function,
+ * the verifier also releases the reference. If any unchecked or unreleased
+ * reference remains at the end of the program, the verifier rejects it.
*/
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
@@ -300,7 +306,7 @@ static bool arg_type_is_refcounted(enum bpf_arg_type type)
*/
static bool is_release_function(enum bpf_func_id func_id)
{
- return false;
+ return func_id == BPF_FUNC_sk_release;
}
/* string representation of 'enum bpf_reg_type' */
diff --git a/net/core/filter.c b/net/core/filter.c
index 23e6e5202138..b092cae8efd5 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -58,13 +58,17 @@
#include <net/busy_poll.h>
#include <net/tcp.h>
#include <net/xfrm.h>
+#include <net/udp.h>
#include <linux/bpf_trace.h>
#include <net/xdp_sock.h>
#include <linux/inetdevice.h>
+#include <net/inet_hashtables.h>
+#include <net/inet6_hashtables.h>
#include <net/ip_fib.h>
#include <net/flow.h>
#include <net/arp.h>
#include <net/ipv6.h>
+#include <net/net_namespace.h>
#include <linux/seg6_local.h>
#include <net/seg6.h>
#include <net/seg6_local.h>
@@ -4811,6 +4815,135 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
};
#endif /* CONFIG_IPV6_SEG6_BPF */
+struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
+ struct sk_buff *skb, u8 proto)
+{
+ int dif = skb->dev->ifindex;
+ bool refcounted = false;
+ struct sock *sk = NULL;
+
+ if (tuple->family == AF_INET) {
+ int sdif = inet_sdif(skb);
+
+ if (proto == IPPROTO_TCP)
+ sk = __inet_lookup(net, &tcp_hashinfo, skb, 0,
+ tuple->saddr.ipv4, tuple->sport,
+ tuple->daddr.ipv4, tuple->dport,
+ dif, sdif, &refcounted);
+ else
+ sk = __udp4_lib_lookup(net,
+ tuple->saddr.ipv4, tuple->sport,
+ tuple->daddr.ipv4, tuple->dport,
+ dif, sdif, &udp_table, skb);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct in6_addr *src6 = (struct in6_addr *)&tuple->saddr.ipv6;
+ struct in6_addr *dst6 = (struct in6_addr *)&tuple->daddr.ipv6;
+ int sdif = inet6_sdif(skb);
+
+ if (proto == IPPROTO_TCP)
+ sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
+ src6, tuple->sport,
+ dst6, tuple->dport,
+ dif, sdif, &refcounted);
+ else
+ sk = __udp6_lib_lookup(net, src6, tuple->sport,
+ dst6, tuple->dport,
+ dif, sdif, &udp_table, skb);
+ }
+#endif
+
+ if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
+ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
+ sk = NULL;
+ }
+ return sk;
+}
+
+/* bpf_sk_lookup performs the core lookup for different types of sockets,
+ * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
+ * Returns the socket as an 'unsigned long' to simplify the casting in the
+ * callers to satisfy BPF_CALL declarations.
+ */
+static unsigned long
+bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ u8 proto, u32 netns_id, u64 flags)
+{
+ struct net *caller_net = dev_net(skb->dev);
+ struct sock *sk = NULL;
+ struct net *net;
+
+ if (unlikely(len != sizeof(struct bpf_sock_tuple) || flags ||
+ (tuple->family != AF_INET && tuple->family != AF_INET6)))
+ goto out;
+
+ if (netns_id)
+ net = get_net_ns_by_id(caller_net, netns_id);
+ else
+ net = caller_net;
+ if (unlikely(!net))
+ goto out;
+ sk = sk_lookup(net, tuple, skb, proto);
+ put_net(net);
+
+ if (sk)
+ sk = sk_to_full_sk(sk);
+out:
+ return (unsigned long) sk;
+}
+
+BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+{
+ return bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
+ .func = bpf_sk_lookup_tcp,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+{
+ return bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
+ .func = bpf_sk_lookup_udp,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_2(bpf_sk_release, struct sock *, sk, u64, flags)
+{
+ if (!sock_flag(sk, SOCK_RCU_FREE))
+ sock_gen_put(sk);
+
+ if (unlikely(flags))
+ return -EINVAL;
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_sk_release_proto = {
+ .func = bpf_sk_release,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_SOCKET,
+ .arg2_type = ARG_ANYTHING,
+};
+
bool bpf_helper_changes_pkt_data(void *func)
{
if (func == bpf_skb_vlan_push ||
@@ -5017,6 +5150,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_ancestor_cgroup_id:
return &bpf_skb_ancestor_cgroup_id_proto;
#endif
+ case BPF_FUNC_sk_lookup_tcp:
+ return &bpf_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_lookup_udp:
+ return &bpf_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -5117,6 +5256,12 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_redirect_hash_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+ case BPF_FUNC_sk_lookup_tcp:
+ return &bpf_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_lookup_udp:
+ return &bpf_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
default:
return bpf_base_func_proto(func_id);
}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 66917a4eba27..8ed6e293113f 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -2141,6 +2141,41 @@ union bpf_attr {
* request in the skb.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * struct bpf_sock_ops *bpf_sk_lookup_tcp(ctx, tuple, tuple_size, netns, flags)
+ * Decription
+ * Look for TCP socket matching 'tuple'. The return value must
+ * be checked, and if non-NULL, released via bpf_sk_release().
+ * @ctx: pointer to ctx
+ * @tuple: pointer to struct bpf_sock_tuple
+ * @tuple_size: size of the tuple
+ * @netns: network namespace id
+ * @flags: flags value
+ * Return
+ * pointer to socket ops on success, or
+ * NULL in case of failure
+ *
+ * struct bpf_sock_ops *bpf_sk_lookup_udp(ctx, tuple, tuple_size, netns, flags)
+ * Decription
+ * Look for UDP socket matching 'tuple'. The return value must
+ * be checked, and if non-NULL, released via bpf_sk_release().
+ * @ctx: pointer to ctx
+ * @tuple: pointer to struct bpf_sock_tuple
+ * @tuple_size: size of the tuple
+ * @netns: network namespace id
+ * @flags: flags value
+ * Return
+ * pointer to socket ops on success, or
+ * NULL in case of failure
+ *
+ * int bpf_sk_release(sock, flags)
+ * Description
+ * Release the reference held by 'sock'.
+ * @sock: Pointer reference to release. Must be found via
+ * bpf_sk_lookup_xxx().
+ * @flags: flags value
+ * Return
+ * 0 on success, or a negative error in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2226,7 +2261,10 @@ union bpf_attr {
FN(get_current_cgroup_id), \
FN(get_local_storage), \
FN(sk_select_reuseport), \
- FN(skb_ancestor_cgroup_id),
+ FN(skb_ancestor_cgroup_id), \
+ FN(sk_lookup_tcp), \
+ FN(sk_lookup_udp), \
+ FN(sk_release),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2395,6 +2433,20 @@ struct bpf_sock {
*/
};
+struct bpf_sock_tuple {
+ union {
+ __be32 ipv6[4];
+ __be32 ipv4;
+ } saddr;
+ union {
+ __be32 ipv6[4];
+ __be32 ipv4;
+ } daddr;
+ __be16 sport;
+ __be16 dport;
+ __u8 family;
+};
+
#define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type.
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index e4be7730222d..88ce00c3aa0f 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -143,6 +143,18 @@ static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
(void *) BPF_FUNC_skb_cgroup_id;
static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
(void *) BPF_FUNC_skb_ancestor_cgroup_id;
+static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
+ struct bpf_sock_tuple *tuple,
+ int size, unsigned int netns_id,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_sk_lookup_tcp;
+static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
+ struct bpf_sock_tuple *tuple,
+ int size, unsigned int netns_id,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_sk_lookup_udp;
+static int (*bpf_sk_release)(struct bpf_sock *sk, unsigned long long flags) =
+ (void *) BPF_FUNC_sk_release;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
--
2.17.1
Powered by blists - more mailing lists