[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200122130549.832236-3-jakub@cloudflare.com>
Date: Wed, 22 Jan 2020 14:05:39 +0100
From: Jakub Sitnicki <jakub@...udflare.com>
To: bpf@...r.kernel.org
Cc: netdev@...r.kernel.org, kernel-team@...udflare.com,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
Daniel Borkmann <daniel@...earbox.net>,
John Fastabend <john.fastabend@...il.com>,
Lorenz Bauer <lmb@...udflare.com>, Martin Lau <kafai@...com>
Subject: [PATCH bpf-next v3 02/12] net, sk_msg: Annotate lockless access to sk_prot on clone
sk_msg and ULP frameworks override protocol callbacks pointer in
sk->sk_prot, while tcp accesses it locklessly when cloning the listening
socket, that is with neither sk_lock nor sk_callback_lock held.
Once we enable use of listening sockets with sockmap (and hence sk_msg),
there will be shared access to sk->sk_prot if socket is getting cloned
while being inserted/deleted to/from the sockmap from another CPU:
Read side:
tcp_v4_rcv
sk = __inet_lookup_skb(...)
tcp_check_req(sk)
inet_csk(sk)->icsk_af_ops->syn_recv_sock
tcp_v4_syn_recv_sock
tcp_create_openreq_child
inet_csk_clone_lock
sk_clone_lock
READ_ONCE(sk->sk_prot)
Write side:
sock_map_ops->map_update_elem
sock_map_update_elem
sock_map_update_common
sock_map_link_no_progs
tcp_bpf_init
tcp_bpf_update_sk_prot
sk_psock_update_proto
WRITE_ONCE(sk->sk_prot, ops)
sock_map_ops->map_delete_elem
sock_map_delete_elem
__sock_map_delete
sock_map_unref
sk_psock_put
sk_psock_drop
sk_psock_restore_proto
tcp_update_ulp
WRITE_ONCE(sk->sk_prot, proto)
Mark the shared access with READ_ONCE/WRITE_ONCE annotations.
Signed-off-by: Jakub Sitnicki <jakub@...udflare.com>
---
include/linux/skmsg.h | 3 ++-
net/core/sock.c | 5 +++--
net/ipv4/tcp_bpf.c | 4 +++-
net/ipv4/tcp_ulp.c | 3 ++-
net/tls/tls_main.c | 3 ++-
5 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 41ea1258d15e..55c834a5c25e 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -352,7 +352,8 @@ static inline void sk_psock_update_proto(struct sock *sk,
psock->saved_write_space = sk->sk_write_space;
psock->sk_proto = sk->sk_prot;
- sk->sk_prot = ops;
+ /* Pairs with lockless read in sk_clone_lock() */
+ WRITE_ONCE(sk->sk_prot, ops);
}
static inline void sk_psock_restore_proto(struct sock *sk,
diff --git a/net/core/sock.c b/net/core/sock.c
index a4c8fac781ff..3953bb23f4d0 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1792,16 +1792,17 @@ static void sk_init_common(struct sock *sk)
*/
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
{
+ struct proto *prot = READ_ONCE(sk->sk_prot);
struct sock *newsk;
bool is_charged = true;
- newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
+ newsk = sk_prot_alloc(prot, priority, sk->sk_family);
if (newsk != NULL) {
struct sk_filter *filter;
sock_copy(newsk, sk);
- newsk->sk_prot_creator = sk->sk_prot;
+ newsk->sk_prot_creator = prot;
/* SANITY */
if (likely(newsk->sk_net_refcnt))
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index e38705165ac9..4f25aba44ead 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -648,8 +648,10 @@ static void tcp_bpf_reinit_sk_prot(struct sock *sk, struct sk_psock *psock)
/* Reinit occurs when program types change e.g. TCP_BPF_TX is removed
* or added requiring sk_prot hook updates. We keep original saved
* hooks in this case.
+ *
+ * Pairs with lockless read in sk_clone_lock().
*/
- sk->sk_prot = &tcp_bpf_prots[family][config];
+ WRITE_ONCE(sk->sk_prot, &tcp_bpf_prots[family][config]);
}
static int tcp_bpf_assert_proto_ops(struct proto *ops)
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index 12ab5db2b71c..acd1ea0a66f7 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -104,7 +104,8 @@ void tcp_update_ulp(struct sock *sk, struct proto *proto)
struct inet_connection_sock *icsk = inet_csk(sk);
if (!icsk->icsk_ulp_ops) {
- sk->sk_prot = proto;
+ /* Pairs with lockless read in sk_clone_lock() */
+ WRITE_ONCE(sk->sk_prot, proto);
return;
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index dac24c7aa7d4..f0748e951dea 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -740,7 +740,8 @@ static void tls_update(struct sock *sk, struct proto *p)
if (likely(ctx))
ctx->sk_proto = p;
else
- sk->sk_prot = p;
+ /* Pairs with lockless read in sk_clone_lock() */
+ WRITE_ONCE(sk->sk_prot, p);
}
static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
--
2.24.1
Powered by blists - more mailing lists