[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAL+tcoDGuVNie-mAzU6t61pQPqt24PWKP88JG-kVM6ZhttUZEg@mail.gmail.com>
Date: Fri, 18 Aug 2023 14:40:22 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: Eric Dumazet <edumazet@...gle.com>
Cc: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, netdev@...r.kernel.org, eric.dumazet@...il.com
Subject: Re: [PATCH net-next] net: annotate data-races around sk->sk_lingertime
On Fri, Aug 18, 2023 at 10:11 AM Eric Dumazet <edumazet@...gle.com> wrote:
>
> sk_getsockopt() runs locklessly. This means sk->sk_lingertime
> can be read while other threads are changing its value.
>
> Other reads also happen without socket lock being held,
> and must be annotated.
>
> Remove preprocessor logic using BITS_PER_LONG, compilers
> are smart enough to figure this by themselves.
>
> Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
> Signed-off-by: Eric Dumazet <edumazet@...gle.com>
Reviewed-by: Jason Xing <kerneljasonxing@...il.com>
Thanks!
> ---
> net/bluetooth/iso.c | 2 +-
> net/bluetooth/sco.c | 2 +-
> net/core/sock.c | 18 +++++++++---------
> net/sched/em_meta.c | 2 +-
> net/smc/af_smc.c | 2 +-
> 5 files changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
> index 6b66d6a88b9a22adf208b24e0a31d6f236355d9b..3c03e49422c7519167a0a2a6f5bdc8af5b2c0cd0 100644
> --- a/net/bluetooth/iso.c
> +++ b/net/bluetooth/iso.c
> @@ -1475,7 +1475,7 @@ static int iso_sock_release(struct socket *sock)
>
> iso_sock_close(sk);
>
> - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
> + if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) &&
> !(current->flags & PF_EXITING)) {
> lock_sock(sk);
> err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
> diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
> index 50ad5935ae47a31cb3d11a8b56f7d462cbaf2366..c736186aba26beadccd76c66f0af72835d740551 100644
> --- a/net/bluetooth/sco.c
> +++ b/net/bluetooth/sco.c
> @@ -1245,7 +1245,7 @@ static int sco_sock_release(struct socket *sock)
>
> sco_sock_close(sk);
>
> - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
> + if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) &&
> !(current->flags & PF_EXITING)) {
> lock_sock(sk);
> err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
> diff --git a/net/core/sock.c b/net/core/sock.c
> index 22d94394335fb75f12da65368e87c5a65167cc0e..e11952aee3777a5df51abdf70d30fbd3ec3a50fc 100644
> --- a/net/core/sock.c
> +++ b/net/core/sock.c
> @@ -797,7 +797,7 @@ EXPORT_SYMBOL(sock_set_reuseport);
> void sock_no_linger(struct sock *sk)
> {
> lock_sock(sk);
> - sk->sk_lingertime = 0;
> + WRITE_ONCE(sk->sk_lingertime, 0);
> sock_set_flag(sk, SOCK_LINGER);
> release_sock(sk);
> }
> @@ -1230,15 +1230,15 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
> ret = -EFAULT;
> break;
> }
> - if (!ling.l_onoff)
> + if (!ling.l_onoff) {
> sock_reset_flag(sk, SOCK_LINGER);
> - else {
> -#if (BITS_PER_LONG == 32)
> - if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
> - sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
> + } else {
> + unsigned int t_sec = ling.l_linger;
> +
> + if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ)
> + WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT);
> else
> -#endif
> - sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
> + WRITE_ONCE(sk->sk_lingertime, t_sec * HZ);
> sock_set_flag(sk, SOCK_LINGER);
> }
> break;
> @@ -1692,7 +1692,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
> case SO_LINGER:
> lv = sizeof(v.ling);
> v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
> - v.ling.l_linger = sk->sk_lingertime / HZ;
> + v.ling.l_linger = READ_ONCE(sk->sk_lingertime) / HZ;
> break;
>
> case SO_BSDCOMPAT:
> diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
> index 6fdba069f6bfd306fa68fc2e68bdcaf0cf4d4e9e..da34fd4c92695f453f1d6547c6e4e8d3afe7a116 100644
> --- a/net/sched/em_meta.c
> +++ b/net/sched/em_meta.c
> @@ -502,7 +502,7 @@ META_COLLECTOR(int_sk_lingertime)
> *err = -1;
> return;
> }
> - dst->value = sk->sk_lingertime / HZ;
> + dst->value = READ_ONCE(sk->sk_lingertime) / HZ;
> }
>
> META_COLLECTOR(int_sk_err_qlen)
> diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
> index f5834af5fad535c420381827548cecdf0d03b0d5..7c77565c39d19c7f1baf1184c4a5bf950c9cfe33 100644
> --- a/net/smc/af_smc.c
> +++ b/net/smc/af_smc.c
> @@ -1820,7 +1820,7 @@ void smc_close_non_accepted(struct sock *sk)
> lock_sock(sk);
> if (!sk->sk_lingertime)
> /* wait for peer closing */
> - sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
> + WRITE_ONCE(sk->sk_lingertime, SMC_MAX_STREAM_WAIT_TIMEOUT);
> __smc_release(smc);
> release_sock(sk);
> sock_put(sk); /* sock_hold above */
> --
> 2.42.0.rc1.204.g551eb34607-goog
>
>
Powered by blists - more mailing lists