[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230802173752.52164-1-kuniyu@amazon.com>
Date: Wed, 2 Aug 2023 10:37:52 -0700
From: Kuniyuki Iwashima <kuniyu@...zon.com>
To: <edumazet@...gle.com>
CC: <davem@...emloft.net>, <dsahern@...nel.org>, <eric.dumazet@...il.com>,
<kuba@...nel.org>, <kuniyu@...zon.com>, <netdev@...r.kernel.org>,
<pabeni@...hat.com>
Subject: Re: [PATCH net 4/6] tcp_metrics: annotate data-races around tm->tcpm_vals[]
From: Eric Dumazet <edumazet@...gle.com>
Date: Wed, 2 Aug 2023 13:14:58 +0000
> tm->tcpm_vals[] values can be read or written locklessly.
>
> Add needed READ_ONCE()/WRITE_ONCE() to document this,
> and force use of tcp_metric_get() and tcp_metric_set()
>
> Fixes: 51c5d0c4b169 ("tcp: Maintain dynamic metrics in local cache.")
> Signed-off-by: Eric Dumazet <edumazet@...gle.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@...zon.com>
> ---
> net/ipv4/tcp_metrics.c | 23 ++++++++++++++---------
> 1 file changed, 14 insertions(+), 9 deletions(-)
>
> diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
> index 131fa300496914f78c682182f0db480ceb71b6a0..fd4ab7a51cef210005146dfbc3235a2db717a44f 100644
> --- a/net/ipv4/tcp_metrics.c
> +++ b/net/ipv4/tcp_metrics.c
> @@ -63,17 +63,19 @@ static bool tcp_metric_locked(struct tcp_metrics_block *tm,
> return READ_ONCE(tm->tcpm_lock) & (1 << idx);
> }
>
> -static u32 tcp_metric_get(struct tcp_metrics_block *tm,
> +static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
> enum tcp_metric_index idx)
> {
> - return tm->tcpm_vals[idx];
> + /* Paired with WRITE_ONCE() in tcp_metric_set() */
> + return READ_ONCE(tm->tcpm_vals[idx]);
> }
>
> static void tcp_metric_set(struct tcp_metrics_block *tm,
> enum tcp_metric_index idx,
> u32 val)
> {
> - tm->tcpm_vals[idx] = val;
> + /* Paired with READ_ONCE() in tcp_metric_get() */
> + WRITE_ONCE(tm->tcpm_vals[idx], val);
> }
>
> static bool addr_same(const struct inetpeer_addr *a,
> @@ -115,13 +117,16 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
> WRITE_ONCE(tm->tcpm_lock, val);
>
> msval = dst_metric_raw(dst, RTAX_RTT);
> - tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
> + tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
>
> msval = dst_metric_raw(dst, RTAX_RTTVAR);
> - tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
> - tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
> - tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
> - tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
> + tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
> + tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
> + dst_metric_raw(dst, RTAX_SSTHRESH));
> + tcp_metric_set(tm, TCP_METRIC_CWND,
> + dst_metric_raw(dst, RTAX_CWND));
> + tcp_metric_set(tm, TCP_METRIC_REORDERING,
> + dst_metric_raw(dst, RTAX_REORDERING));
> if (fastopen_clear) {
> tm->tcpm_fastopen.mss = 0;
> tm->tcpm_fastopen.syn_loss = 0;
> @@ -667,7 +672,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
> if (!nest)
> goto nla_put_failure;
> for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
> - u32 val = tm->tcpm_vals[i];
> + u32 val = tcp_metric_get(tm, i);
>
> if (!val)
> continue;
> --
> 2.41.0.640.ga95def55d0-goog
Powered by blists - more mailing lists