[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210216105713.45052-5-lmb@cloudflare.com>
Date: Tue, 16 Feb 2021 10:57:09 +0000
From: Lorenz Bauer <lmb@...udflare.com>
To: ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
jakub@...udflare.com
Cc: kernel-team@...udflare.com, bpf@...r.kernel.org,
netdev@...r.kernel.org, Lorenz Bauer <lmb@...udflare.com>
Subject: [PATCH bpf-next 4/8] bpf: add PROG_TEST_RUN support for sk_lookup programs
Allow to pass (multiple) sk_lookup programs to PROG_TEST_RUN.
User space provides the full bpf_sk_lookup struct as context.
Since the context includes a socket pointer that can't be exposed
to user space we define that PROG_TEST_RUN returns the cookie
of the selected socket or zero in place of the socket pointer.
We don't support testing programs that select a reuseport socket,
since this would mean running another (unrelated) BPF program
from the sk_lookup test handler.
Signed-off-by: Lorenz Bauer <lmb@...udflare.com>
---
include/linux/bpf.h | 10 ++++
include/uapi/linux/bpf.h | 5 +-
net/bpf/test_run.c | 93 ++++++++++++++++++++++++++++++++++
net/core/filter.c | 1 +
tools/include/uapi/linux/bpf.h | 5 +-
5 files changed, 112 insertions(+), 2 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 67c21c8ba7cc..d251db1354ec 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1472,6 +1472,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_sk_lookup(struct bpf_prog_array *progs,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
@@ -1672,6 +1675,13 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
return -ENOTSUPP;
}
+static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog_array *progs,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
static inline void bpf_map_put(struct bpf_map *map)
{
}
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index b37a0f39b95f..078ad0b8d1a7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5210,7 +5210,10 @@ struct bpf_pidns_info {
/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
struct bpf_sk_lookup {
- __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ union {
+ __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
+ };
__u32 family; /* Protocol family (AF_INET, AF_INET6) */
__u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 33bd2f67e259..932c8e036b0a 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -10,8 +10,10 @@
#include <net/bpf_sk_storage.h>
#include <net/sock.h>
#include <net/tcp.h>
+#include <net/net_namespace.h>
#include <linux/error-injection.h>
#include <linux/smp.h>
+#include <linux/sock_diag.h>
#define CREATE_TRACE_POINTS
#include <trace/events/bpf_test_run.h>
@@ -781,3 +783,94 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
kfree(data);
return ret;
}
+
+int bpf_prog_test_run_sk_lookup(struct bpf_prog_array *progs, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ struct test_timer t = { NO_PREEMPT };
+ struct bpf_sk_lookup_kern ctx = {};
+ u32 repeat = kattr->test.repeat;
+ struct bpf_sk_lookup *user_ctx;
+ u32 retval, duration;
+ int ret = -EINVAL;
+
+ if (bpf_prog_array_length(progs) >= BPF_SK_LOOKUP_MAX_PROGS)
+ return -E2BIG;
+
+ if (kattr->test.flags || kattr->test.cpu)
+ return -EINVAL;
+
+ if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
+ kattr->test.data_size_out)
+ return -EINVAL;
+
+ if (!repeat)
+ repeat = 1;
+
+ user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
+ if (IS_ERR(user_ctx))
+ return PTR_ERR(user_ctx);
+
+ if (!user_ctx)
+ return -EINVAL;
+
+ if (user_ctx->sk)
+ goto out;
+
+ if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
+ goto out;
+
+ if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
+ ret = -ERANGE;
+ goto out;
+ }
+
+ ctx.family = user_ctx->family;
+ ctx.protocol = user_ctx->protocol;
+ ctx.dport = user_ctx->local_port;
+ ctx.sport = user_ctx->remote_port;
+
+ switch (ctx.family) {
+ case AF_INET:
+ ctx.v4.daddr = user_ctx->local_ip4;
+ ctx.v4.saddr = user_ctx->remote_ip4;
+ break;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
+ ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
+ break;
+#endif
+
+ default:
+ ret = -EAFNOSUPPORT;
+ goto out;
+ }
+
+ while (t_check(&t, repeat, &ret, &duration)) {
+ ctx.selected_sk = NULL;
+ retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN);
+ }
+
+ if (ret < 0)
+ goto out;
+
+ user_ctx->cookie = 0;
+ if (ctx.selected_sk) {
+ if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
+ }
+
+ ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
+ if (!ret)
+ ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
+
+out:
+ kfree(user_ctx);
+ return ret;
+}
diff --git a/net/core/filter.c b/net/core/filter.c
index 7059cf604d94..978cea941268 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -10451,6 +10451,7 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
}
const struct bpf_prog_ops sk_lookup_prog_ops = {
+ .test_run_array = bpf_prog_test_run_sk_lookup,
};
const struct bpf_verifier_ops sk_lookup_verifier_ops = {
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index b37a0f39b95f..078ad0b8d1a7 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -5210,7 +5210,10 @@ struct bpf_pidns_info {
/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
struct bpf_sk_lookup {
- __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ union {
+ __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
+ };
__u32 family; /* Protocol family (AF_INET, AF_INET6) */
__u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
--
2.27.0
Powered by blists - more mailing lists