lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ca852e76-2627-4e07-8005-34168271bf12@linux.dev>
Date: Wed, 15 Jan 2025 13:48:37 -0800
From: Martin KaFai Lau <martin.lau@...ux.dev>
To: Jason Xing <kerneljasonxing@...il.com>
Cc: davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
 pabeni@...hat.com, dsahern@...nel.org, willemdebruijn.kernel@...il.com,
 willemb@...gle.com, ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
 eddyz87@...il.com, song@...nel.org, yonghong.song@...ux.dev,
 john.fastabend@...il.com, kpsingh@...nel.org, sdf@...ichev.me,
 haoluo@...gle.com, jolsa@...nel.org, horms@...nel.org, bpf@...r.kernel.org,
 netdev@...r.kernel.org
Subject: Re: [PATCH net-next v5 05/15] net-timestamp: add strict check in some
 BPF calls

On 1/12/25 3:37 AM, Jason Xing wrote:
> In the next round, we will support the UDP proto for SO_TIMESTAMPING
> bpf extension, so we need to ensure there is no safety problem.
> 
> Signed-off-by: Jason Xing <kerneljasonxing@...il.com>
> ---
>   net/core/filter.c | 9 +++++++--
>   1 file changed, 7 insertions(+), 2 deletions(-)
> 
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 0e915268db5f..517f09aabc92 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -5571,7 +5571,7 @@ static int __bpf_getsockopt(struct sock *sk, int level, int optname,
>   static int _bpf_getsockopt(struct sock *sk, int level, int optname,
>   			   char *optval, int optlen)
>   {
> -	if (sk_fullsock(sk))
> +	if (sk_fullsock(sk) && optname != SK_BPF_CB_FLAGS)
>   		sock_owned_by_me(sk);
>   	return __bpf_getsockopt(sk, level, optname, optval, optlen);
>   }
> @@ -5776,6 +5776,7 @@ BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
>   	   int, level, int, optname, char *, optval, int, optlen)
>   {
>   	if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP &&
> +	    bpf_sock->sk->sk_protocol == IPPROTO_TCP &&
>   	    optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) {
>   		int ret, copy_len = 0;
>   		const u8 *start;
> @@ -5817,7 +5818,8 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
>   	struct sock *sk = bpf_sock->sk;
>   	int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
>   
> -	if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
> +	if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk) ||
> +	    sk->sk_protocol != IPPROTO_TCP)
>   		return -EINVAL;
>   
>   	tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
> @@ -7626,6 +7628,9 @@ BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
>   	u8 search_kind, search_len, copy_len, magic_len;
>   	int ret;
>   
> +	if (bpf_sock->op != SK_BPF_CB_FLAGS)

SK_BPF_CB_FLAGS is not an op enum, so the check is incorrect. It does break the 
existing test.

./test_progs -t tcp_hdr_options
WARNING! Selftests relying on bpf_testmod.ko will be skipped.
#402/1   tcp_hdr_options/simple_estab:FAIL
#402/2   tcp_hdr_options/no_exprm_estab:FAIL
#402/3   tcp_hdr_options/syncookie_estab:FAIL
#402/4   tcp_hdr_options/fastopen_estab:FAIL
#402/5   tcp_hdr_options/fin:FAIL
#402/6   tcp_hdr_options/misc:FAIL
#402     tcp_hdr_options:FAIL
#402/1   tcp_hdr_options/simple_estab:FAIL
#402/2   tcp_hdr_options/no_exprm_estab:FAIL
#402/3   tcp_hdr_options/syncookie_estab:FAIL
#402/4   tcp_hdr_options/fastopen_estab:FAIL
#402/5   tcp_hdr_options/fin:FAIL
#402/6   tcp_hdr_options/misc:FAIL
#402     tcp_hdr_options:FAIL


Many changes of this set is in bpf and the newly added selftest is also a bpf 
prog, all bpf selftests should be run before posting. 
(Documentation/bpf/bpf_devel_QA.rst)

The bpf CI can automatically pick it up and get an auto email on breakage like 
this if the set is tagged to bpf-next. We can figure out where to land the set 
later (bpf-next/net or net-next/main) when it is ready.

All these changes also need a test in selftests/bpf. For example, I expect there 
is a test to ensure calling these bpf helpers from the new tstamp callback will 
get a negative errno value.

For patch 4 and patch 5, I would suggest keeping it simple to only check for 
bpf_sock->op for the helpers that make tcp_sock and/or locked sk assumption.
Something like this on top of your patch. Untested:

diff --git i/net/core/filter.c w/net/core/filter.c
index 517f09aabc92..ccb13b61c528 100644
--- i/net/core/filter.c
+++ w/net/core/filter.c
@@ -7620,6 +7620,11 @@ static const u8 *bpf_search_tcp_opt(const u8 *op, const 
u8 *opend,
  	return ERR_PTR(-ENOMSG);
  }

+static bool is_locked_tcp_sock_ops(struct bpf_sock_ops_kern *bpf_sock)
+{
+	return bpf_sock->op <= BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
+}
+
  BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
  	   void *, search_res, u32, len, u64, flags)
  {
@@ -7628,8 +7633,8 @@ BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct 
bpf_sock_ops_kern *, bpf_sock,
  	u8 search_kind, search_len, copy_len, magic_len;
  	int ret;

-	if (bpf_sock->op != SK_BPF_CB_FLAGS)
-		return -EINVAL;
+	if (!is_locked_tcp_sock_ops(bpf_sock))
+		return -EOPNOTSUPP;

  	/* 2 byte is the minimal option len except TCPOPT_NOP and
  	 * TCPOPT_EOL which are useless for the bpf prog to learn


> +		return -EINVAL;
> +
>   	/* 2 byte is the minimal option len except TCPOPT_NOP and
>   	 * TCPOPT_EOL which are useless for the bpf prog to learn
>   	 * and this helper disallow loading them also.


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ