[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250121012901.87763-5-kerneljasonxing@gmail.com>
Date: Tue, 21 Jan 2025 09:28:52 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
dsahern@...nel.org,
willemdebruijn.kernel@...il.com,
willemb@...gle.com,
ast@...nel.org,
daniel@...earbox.net,
andrii@...nel.org,
martin.lau@...ux.dev,
eddyz87@...il.com,
song@...nel.org,
yonghong.song@...ux.dev,
john.fastabend@...il.com,
kpsingh@...nel.org,
sdf@...ichev.me,
haoluo@...gle.com,
jolsa@...nel.org,
horms@...nel.org
Cc: bpf@...r.kernel.org,
netdev@...r.kernel.org,
Jason Xing <kerneljasonxing@...il.com>
Subject: [RFC PATCH net-next v6 04/13] bpf: stop UDP sock accessing TCP fields in sock_op BPF CALLs
In the next round, we will support the UDP proto for SO_TIMESTAMPING
bpf extension, so we need to ensure there is no safety problem, which
is ususally caused by UDP socket trying to access TCP fields.
These approaches can be categorized into two groups:
1. add TCP protocol check
2. add sock op check
Signed-off-by: Jason Xing <kerneljasonxing@...il.com>
---
net/core/filter.c | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c
index fdd305b4cfbb..934431886876 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5523,6 +5523,11 @@ static int __bpf_setsockopt(struct sock *sk, int level, int optname,
return -EINVAL;
}
+static bool is_locked_tcp_sock_ops(struct bpf_sock_ops_kern *bpf_sock)
+{
+ return bpf_sock->op <= BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
+}
+
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{
@@ -5673,7 +5678,12 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
- return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
+ struct sock *sk = bpf_sock->sk;
+
+ if (is_locked_tcp_sock_ops(bpf_sock) && sk_fullsock(sk))
+ sock_owned_by_me(sk);
+
+ return __bpf_setsockopt(sk, level, optname, optval, optlen);
}
static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
@@ -5759,6 +5769,7 @@ BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP &&
+ bpf_sock->sk->sk_protocol == IPPROTO_TCP &&
optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) {
int ret, copy_len = 0;
const u8 *start;
@@ -5800,7 +5811,8 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
struct sock *sk = bpf_sock->sk;
int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
- if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
+ if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk) ||
+ sk->sk_protocol != IPPROTO_TCP)
return -EINVAL;
tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
@@ -7609,6 +7621,9 @@ BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
u8 search_kind, search_len, copy_len, magic_len;
int ret;
+ if (!is_locked_tcp_sock_ops(bpf_sock))
+ return -EOPNOTSUPP;
+
/* 2 byte is the minimal option len except TCPOPT_NOP and
* TCPOPT_EOL which are useless for the bpf prog to learn
* and this helper disallow loading them also.
--
2.43.5
Powered by blists - more mailing lists