lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 19 Aug 2020 14:22:55 -0700
From:   John Fastabend <john.fastabend@...il.com>
To:     Lorenz Bauer <lmb@...udflare.com>, jakub@...udflare.com,
        john.fastabend@...il.com, Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Lorenz Bauer <lmb@...udflare.com>,
        "David S. Miller" <davem@...emloft.net>,
        Jakub Kicinski <kuba@...nel.org>
Cc:     kernel-team@...udflare.com, netdev@...r.kernel.org,
        bpf@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: RE: [PATCH bpf-next 5/6] bpf: sockmap: allow update from BPF

Lorenz Bauer wrote:
> Allow calling bpf_map_update_elem on sockmap and sockhash from a BPF
> context. The synchronization required for this is a bit fiddly: we
> need to prevent the socket from changing it's state while we add it
> to the sockmap, since we rely on getting a callback via
> sk_prot->unhash. However, we can't just lock_sock like in
> sock_map_sk_acquire because that might sleep. So instead we disable
> softirq processing and use bh_lock_sock to prevent further
> modification.
> 
> Signed-off-by: Lorenz Bauer <lmb@...udflare.com>
> ---
>  kernel/bpf/verifier.c |  6 ++++--
>  net/core/sock_map.c   | 24 ++++++++++++++++++++++++
>  2 files changed, 28 insertions(+), 2 deletions(-)
> 
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 47f9b94bb9d4..421fccf18dea 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -4254,7 +4254,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
>  		    func_id != BPF_FUNC_map_delete_elem &&
>  		    func_id != BPF_FUNC_msg_redirect_map &&
>  		    func_id != BPF_FUNC_sk_select_reuseport &&
> -		    func_id != BPF_FUNC_map_lookup_elem)
> +		    func_id != BPF_FUNC_map_lookup_elem &&
> +		    func_id != BPF_FUNC_map_update_elem)
>  			goto error;
>  		break;
>  	case BPF_MAP_TYPE_SOCKHASH:
> @@ -4263,7 +4264,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
>  		    func_id != BPF_FUNC_map_delete_elem &&
>  		    func_id != BPF_FUNC_msg_redirect_hash &&
>  		    func_id != BPF_FUNC_sk_select_reuseport &&
> -		    func_id != BPF_FUNC_map_lookup_elem)
> +		    func_id != BPF_FUNC_map_lookup_elem &&
> +		    func_id != BPF_FUNC_map_update_elem)

I lost track of a detail here, map_lookup_elem should return
PTR_TO_MAP_VALUE_OR_NULL but if we want to feed that back into
the map_update_elem() we need to return PTR_TO_SOCKET_OR_NULL
and then presumably have a null check to get a PTR_TO_SOCKET
type as expect.

Can we use the same logic for expected arg (previous patch) on the
ret_type. Or did I miss it:/ Need some coffee I guess.

>  			goto error;
>  		break;
>  	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
> diff --git a/net/core/sock_map.c b/net/core/sock_map.c
> index 018367fb889f..b2c886c34566 100644
> --- a/net/core/sock_map.c
> +++ b/net/core/sock_map.c
> @@ -603,6 +603,28 @@ int sock_map_update_elem_sys(struct bpf_map *map, void *key,
>  	return ret;
>  }
>  
> +static int sock_map_update_elem(struct bpf_map *map, void *key,
> +				void *value, u64 flags)
> +{
> +	struct sock *sk = (struct sock *)value;
> +	int ret;
> +
> +	if (!sock_map_sk_is_suitable(sk))
> +		return -EOPNOTSUPP;
> +
> +	local_bh_disable();
> +	bh_lock_sock(sk);

How do ensure we are not being called from some context which
already has the bh_lock_sock() held? It seems we can call map_update_elem()
from any context, kprobes, tc, xdp, etc.?

> +	if (!sock_map_sk_state_allowed(sk))
> +		ret = -EOPNOTSUPP;
> +	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
> +		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
> +	else
> +		ret = sock_hash_update_common(map, key, sk, flags);
> +	bh_unlock_sock(sk);
> +	local_bh_enable();
> +	return ret;
> +}
> +
>  BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
>  	   struct bpf_map *, map, void *, key, u64, flags)
>  {
> @@ -687,6 +709,7 @@ const struct bpf_map_ops sock_map_ops = {
>  	.map_free		= sock_map_free,
>  	.map_get_next_key	= sock_map_get_next_key,
>  	.map_lookup_elem_sys_only = sock_map_lookup_sys,
> +	.map_update_elem	= sock_map_update_elem,
>  	.map_delete_elem	= sock_map_delete_elem,
>  	.map_lookup_elem	= sock_map_lookup,
>  	.map_release_uref	= sock_map_release_progs,
> @@ -1180,6 +1203,7 @@ const struct bpf_map_ops sock_hash_ops = {
>  	.map_alloc		= sock_hash_alloc,
>  	.map_free		= sock_hash_free,
>  	.map_get_next_key	= sock_hash_get_next_key,
> +	.map_update_elem	= sock_map_update_elem,
>  	.map_delete_elem	= sock_hash_delete_elem,
>  	.map_lookup_elem	= sock_hash_lookup,
>  	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
> -- 
> 2.25.1
> 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ