[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200317170439.873532-4-jakub@cloudflare.com>
Date: Tue, 17 Mar 2020 18:04:39 +0100
From: Jakub Sitnicki <jakub@...udflare.com>
To: netdev@...r.kernel.org
Cc: kernel-team@...udflare.com,
John Fastabend <john.fastabend@...il.com>
Subject: [PATCH net-next 3/3] net/tls: Annotate access to sk_prot with READ_ONCE/WRITE_ONCE
sockmap performs lockless writes to sk->sk_prot on the following paths:
tcp_bpf_{recvmsg|sendmsg} / sock_map_unref
sk_psock_put
sk_psock_drop
sk_psock_restore_proto
WRITE_ONCE(sk->sk_prot, proto)
To prevent load/store tearing [1], and to make tooling aware of intentional
shared access [2], we need to annotate other sites that access sk_prot with
READ_ONCE/WRITE_ONCE macros.
Change done with Coccinelle with following semantic patch:
@@
expression E;
identifier I;
struct sock *sk;
identifier sk_prot =~ "^sk_prot$";
@@
(
E =
-sk->sk_prot
+READ_ONCE(sk->sk_prot)
|
-sk->sk_prot = E
+WRITE_ONCE(sk->sk_prot, E)
|
-sk->sk_prot
+READ_ONCE(sk->sk_prot)
->I
)
Signed-off-by: Jakub Sitnicki <jakub@...udflare.com>
---
net/tls/tls_device.c | 2 +-
net/tls/tls_main.c | 9 +++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 1c5574e2e058..a562ebaaa33c 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -366,7 +366,7 @@ static int tls_do_allocation(struct sock *sk,
if (!offload_ctx->open_record) {
if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
sk->sk_allocation))) {
- sk->sk_prot->enter_memory_pressure(sk);
+ READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
return -ENOMEM;
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index e7de0306a7df..156efce50dbd 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -69,7 +69,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
- sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
+ WRITE_ONCE(sk->sk_prot,
+ &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
}
int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -312,7 +313,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
write_lock_bh(&sk->sk_callback_lock);
if (free_ctx)
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
- sk->sk_prot = ctx->sk_proto;
+ WRITE_ONCE(sk->sk_prot, ctx->sk_proto);
if (sk->sk_write_space == tls_write_space)
sk->sk_write_space = ctx->sk_write_space;
write_unlock_bh(&sk->sk_callback_lock);
@@ -621,14 +622,14 @@ struct tls_context *tls_ctx_create(struct sock *sk)
mutex_init(&ctx->tx_lock);
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
- ctx->sk_proto = sk->sk_prot;
+ ctx->sk_proto = READ_ONCE(sk->sk_prot);
return ctx;
}
static void tls_build_proto(struct sock *sk)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
- const struct proto *prot = sk->sk_prot;
+ const struct proto *prot = READ_ONCE(sk->sk_prot);
/* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
if (ip_ver == TLSV6 &&
--
2.24.1
Powered by blists - more mailing lists