[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230315205746.3801038-5-edumazet@google.com>
Date: Wed, 15 Mar 2023 20:57:44 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: netdev@...r.kernel.org, eric.dumazet@...il.com,
Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net-next 4/6] tcp: annotate lockless access to sk->sk_err
tcp_poll() reads sk->sk_err without socket lock held/owned.
We should used READ_ONCE() here, and update writers
to use WRITE_ONCE().
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
net/ipv4/tcp.c | 11 ++++++-----
net/ipv4/tcp_input.c | 6 +++---
net/ipv4/tcp_ipv4.c | 4 ++--
net/ipv4/tcp_output.c | 2 +-
net/ipv4/tcp_timer.c | 2 +-
net/ipv6/tcp_ipv6.c | 4 ++--
6 files changed, 15 insertions(+), 14 deletions(-)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 288693981b0060c028680033aee143466b2285e4..01569de651b65aa5641fb0c06e0fb81dc40cd85a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -589,7 +589,8 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
}
/* This barrier is coupled with smp_wmb() in tcp_reset() */
smp_rmb();
- if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ if (READ_ONCE(sk->sk_err) ||
+ !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR;
return mask;
@@ -3094,7 +3095,7 @@ int tcp_disconnect(struct sock *sk, int flags)
if (old_state == TCP_LISTEN) {
inet_csk_listen_stop(sk);
} else if (unlikely(tp->repair)) {
- sk->sk_err = ECONNABORTED;
+ WRITE_ONCE(sk->sk_err, ECONNABORTED);
} else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
@@ -3102,9 +3103,9 @@ int tcp_disconnect(struct sock *sk, int flags)
* states
*/
tcp_send_active_reset(sk, gfp_any());
- sk->sk_err = ECONNRESET;
+ WRITE_ONCE(sk->sk_err, ECONNRESET);
} else if (old_state == TCP_SYN_SENT)
- sk->sk_err = ECONNRESET;
+ WRITE_ONCE(sk->sk_err, ECONNRESET);
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
@@ -4692,7 +4693,7 @@ int tcp_abort(struct sock *sk, int err)
bh_lock_sock(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->sk_err = err;
+ WRITE_ONCE(sk->sk_err, err);
/* This barrier is coupled with smp_rmb() in tcp_poll() */
smp_wmb();
sk_error_report(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8b5b6ca6617d0f6e2b03cf7164a6e8929fc521e1..754ddbe0577f139d209032b4f15787392fe9a1d5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4322,15 +4322,15 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
/* We want the right error as BSD sees it (and indeed as we do). */
switch (sk->sk_state) {
case TCP_SYN_SENT:
- sk->sk_err = ECONNREFUSED;
+ WRITE_ONCE(sk->sk_err, ECONNREFUSED);
break;
case TCP_CLOSE_WAIT:
- sk->sk_err = EPIPE;
+ WRITE_ONCE(sk->sk_err, EPIPE);
break;
case TCP_CLOSE:
return;
default:
- sk->sk_err = ECONNRESET;
+ WRITE_ONCE(sk->sk_err, ECONNRESET);
}
/* This barrier is coupled with smp_rmb() in tcp_poll() */
smp_wmb();
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 4f6894469b620a75963b9329fc9944d835671515..89daa6b953ff4c49284959b2500618a963685d7d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -596,7 +596,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
if (!sock_owned_by_user(sk)) {
- sk->sk_err = err;
+ WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk);
@@ -625,7 +625,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
- sk->sk_err = err;
+ WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk);
} else { /* Only an error on timeout */
WRITE_ONCE(sk->sk_err_soft, err);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 71d01cf3c13eb4bd3d314ef140568d2ffd6a499e..f7e00d90a7304cdbaee493d994c6ab1063392d34 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3699,7 +3699,7 @@ static void tcp_connect_init(struct sock *sk)
tp->rx_opt.rcv_wscale = rcv_wscale;
tp->rcv_ssthresh = tp->rcv_wnd;
- sk->sk_err = 0;
+ WRITE_ONCE(sk->sk_err, 0);
sock_reset_flag(sk, SOCK_DONE);
tp->snd_wnd = 0;
tcp_init_wl(tp, 0);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8823e2182713a26fa42ce44a21b9ec7a4d7e1c73..b839c2f91292f7346f33d6dcbf597594473a5aca 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -67,7 +67,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
static void tcp_write_err(struct sock *sk)
{
- sk->sk_err = READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT;
+ WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
sk_error_report(sk);
tcp_write_queue_purge(sk);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index dc963eebc668f7d24981de21650608a27e431d41..35cf523c9efd4370bf3110f6777592eabb15ca9e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -493,7 +493,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
if (!sock_owned_by_user(sk)) {
- sk->sk_err = err;
+ WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
tcp_done(sk);
@@ -513,7 +513,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
if (!sock_owned_by_user(sk) && np->recverr) {
- sk->sk_err = err;
+ WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk);
} else {
WRITE_ONCE(sk->sk_err_soft, err);
--
2.40.0.rc2.332.ga46443480c-goog
Powered by blists - more mailing lists