[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231205013420.88067-2-kuniyu@amazon.com>
Date: Tue, 5 Dec 2023 10:34:18 +0900
From: Kuniyuki Iwashima <kuniyu@...zon.com>
To: Eric Dumazet <edumazet@...gle.com>, Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>, Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>
CC: Kuniyuki Iwashima <kuniyu@...zon.com>, Kuniyuki Iwashima
<kuni1840@...il.com>, <bpf@...r.kernel.org>, <netdev@...r.kernel.org>
Subject: [PATCH v4 bpf-next 1/3] bpf: tcp: Handle BPF SYN Cookie in cookie_v[46]_check().
We will support arbitrary SYN Cookie with BPF in the following
patch.
If BPF prog validates ACK and kfunc allocates reqsk, it will
be carried to cookie_[46]_check() as skb->sk. Then, we call
cookie_bpf_check() to validate the configuration passed to kfunc.
First, we clear skb->sk, skb->destructor, and req->rsk_listener,
which are needed not to hold refcnt for reqsk and the listener.
See the following patch for details.
Then, we parse TCP options to check if tstamp_ok is discrepant.
If it is invalid, we increment LINUX_MIB_SYNCOOKIESFAILED and send
RST. If tstamp_ok is valid, we increment LINUX_MIB_SYNCOOKIESRECV.
After that, we check sack_ok and wscale_ok with corresponding
sysctl knobs. If the test fails, we send RST but do not increment
LINUX_MIB_SYNCOOKIESFAILED. This behaviour is the same with the
non-BPF cookie handling in cookie_tcp_check().
Finally, we finish initialisation for the remaining fields with
cookie_tcp_reqsk_init().
Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
---
include/net/tcp.h | 21 +++++++++++++++
net/ipv4/syncookies.c | 62 +++++++++++++++++++++++++++++++++++++++++--
net/ipv6/syncookies.c | 9 +++++--
3 files changed, 88 insertions(+), 4 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 973555cb1d3f..842791997f30 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -590,6 +590,27 @@ static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *
dst_feature(dst, RTAX_FEATURE_ECN);
}
+#if IS_ENABLED(CONFIG_BPF)
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return skb->sk;
+}
+
+struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
+ struct sk_buff *skb);
+#else
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ return NULL;
+}
+#endif
+
/* From net/ipv6/syncookies.c */
int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 61f1c96cfe63..0f9c3aed2014 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -304,6 +304,59 @@ static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb,
return 0;
}
+#if IS_ENABLED(CONFIG_BPF)
+struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct request_sock *req = inet_reqsk(skb->sk);
+ struct inet_request_sock *ireq = inet_rsk(req);
+ struct tcp_request_sock *treq = tcp_rsk(req);
+ struct tcp_options_received tcp_opt;
+ int ret;
+
+ skb->sk = NULL;
+ skb->destructor = NULL;
+ req->rsk_listener = NULL;
+
+ memset(&tcp_opt, 0, sizeof(tcp_opt));
+ tcp_parse_options(net, skb, &tcp_opt, 0, NULL);
+
+ if (ireq->tstamp_ok ^ tcp_opt.saw_tstamp) {
+ __NET_INC_STATS(net, LINUX_MIB_SYNCOOKIESFAILED);
+ goto reset;
+ }
+
+ __NET_INC_STATS(net, LINUX_MIB_SYNCOOKIESRECV);
+
+ if (ireq->tstamp_ok) {
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps))
+ goto reset;
+
+ req->ts_recent = tcp_opt.rcv_tsval;
+ treq->ts_off = tcp_opt.rcv_tsecr - tcp_ns_to_ts(false, tcp_clock_ns());
+ }
+
+ if (ireq->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack))
+ goto reset;
+
+ if (ireq->wscale_ok && !READ_ONCE(net->ipv4.sysctl_tcp_window_scaling))
+ goto reset;
+
+ ret = cookie_tcp_reqsk_init(sk, skb, req);
+ if (ret) {
+ reqsk_free(req);
+ req = NULL;
+ }
+
+ return req;
+
+reset:
+ reqsk_free(req);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(cookie_bpf_check);
+#endif
+
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct sock *sk, struct sk_buff *skb,
struct tcp_options_received *tcp_opt,
@@ -404,7 +457,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
+ if (cookie_bpf_ok(skb))
+ req = cookie_bpf_check(net, sk, skb);
+ else
+ req = cookie_tcp_check(net, sk, skb);
+
if (IS_ERR(req))
goto out;
if (!req)
@@ -454,7 +511,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, &rt->dst);
ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index c8d2ca27220c..24224138ba1a 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -182,7 +182,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
+ if (cookie_bpf_ok(skb))
+ req = cookie_bpf_check(net, sk, skb);
+ else
+ req = cookie_tcp_check(net, sk, skb);
+
if (IS_ERR(req))
goto out;
if (!req)
@@ -247,7 +251,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, dst);
ret = tcp_get_cookie_sock(sk, skb, req, dst);
--
2.30.2
Powered by blists - more mailing lists