[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <77e06763-76a8-46ab-9a51-efc708720a98@linux.dev>
Date: Mon, 26 Jan 2026 19:17:35 +0000
From: Vadim Fedorenko <vadim.fedorenko@...ux.dev>
To: David Yang <mmyangfl@...il.com>, netdev@...r.kernel.org
Cc: Florian Fainelli <florian.fainelli@...adcom.com>,
Broadcom internal kernel review list
<bcm-kernel-feedback-list@...adcom.com>, Andrew Lunn
<andrew+netdev@...n.ch>, "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, linux-kernel@...r.kernel.org
Subject: Re: [RFC net-next] net: systemport: Use u64_stats_t with
u64_stats_sync properly
On 22/01/2026 20:15, David Yang wrote:
> On 64bit arches, struct u64_stats_sync is empty and provides no help
> against load/store tearing. Convert to u64_stats_t to ensure atomic
> operations.
>
> Signed-off-by: David Yang <mmyangfl@...il.com>
> ---
> RFC Comment:
>
> I couldn't find the lock associated with u64_stats_sync. Should this be
> considered an issue?
Didn't get the question. u64_stats_sync structure has it's own lock for
cases where atomic operations are not possible.
> drivers/net/ethernet/broadcom/bcmsysport.c | 56 ++++++++++++----------
> drivers/net/ethernet/broadcom/bcmsysport.h | 12 ++---
> 2 files changed, 37 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
> index bc4e1f3b3752..7017b2d2a1b5 100644
> --- a/drivers/net/ethernet/broadcom/bcmsysport.c
> +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
> @@ -435,8 +435,8 @@ static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
> ring = &priv->tx_rings[q];
> do {
> start = u64_stats_fetch_begin(&priv->syncp);
> - bytes = ring->bytes;
> - packets = ring->packets;
> + bytes = u64_stats_read(&ring->bytes);
> + packets = u64_stats_read(&ring->packets);
> } while (u64_stats_fetch_retry(&priv->syncp, start));
please, look the comment below which includes reference to this block...
>
> *tx_bytes += bytes;
> @@ -458,8 +458,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
> if (netif_running(dev)) {
> bcm_sysport_update_mib_counters(priv);
> bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
> - stats64->tx_bytes = tx_bytes;
> - stats64->tx_packets = tx_packets;
> + u64_stats_update_begin(&priv->syncp);
> + u64_stats_set(&stats64->tx_bytes, tx_bytes);
> + u64_stats_set(&stats64->tx_packets, tx_packets);
> + u64_stats_update_end(&priv->syncp);
> }
>
> for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
> @@ -482,28 +484,32 @@ static void bcm_sysport_get_stats(struct net_device *dev,
> s->type == BCM_SYSPORT_STAT_NETDEV64) {
> do {
> start = u64_stats_fetch_begin(syncp);
> - data[i] = *(u64 *)p;
> + data[i] = u64_stats_read((u64_stats_t *)p);
> } while (u64_stats_fetch_retry(syncp, start));
> } else
> data[i] = *(u32 *)p;
> j++;
> }
>
> - /* For SYSTEMPORT Lite since we have holes in our statistics, j would
> - * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
> - * needs to point to how many total statistics we have minus the
> - * number of per TX queue statistics
> - */
> - j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
> - dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
> + do {
> + /* For SYSTEMPORT Lite since we have holes in our statistics, j
> + * would be equal to BCM_SYSPORT_STATS_LEN at the end of the
> + * loop, but it needs to point to how many total statistics we
> + * have minus the number of per TX queue statistics
> + */
> + j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
> + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
>
> - for (i = 0; i < dev->num_tx_queues; i++) {
> - ring = &priv->tx_rings[i];
> - data[j] = ring->packets;
> - j++;
> - data[j] = ring->bytes;
> - j++;
> - }
> + start = u64_stats_fetch_begin(syncp);
> +
> + for (i = 0; i < dev->num_tx_queues; i++) {
> + ring = &priv->tx_rings[i];
> + data[j] = u64_stats_read(&ring->packets);
> + j++;
> + data[j] = u64_stats_read(&ring->bytes);
> + j++;
> + }
> + } while (u64_stats_fetch_retry(syncp, start));
ring is protected by ring::lock, there is no need to introduce another
per-device lock. ring statistics can be protected by the same lock, and
to improve the code, ring::lock can be converted into seqlock_t.
> }
>
> static void bcm_sysport_get_wol(struct net_device *dev,
> @@ -829,8 +835,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
> ndev->stats.rx_packets++;
> ndev->stats.rx_bytes += len;
> u64_stats_update_begin(&priv->syncp);
> - stats64->rx_packets++;
> - stats64->rx_bytes += len;
> + u64_stats_inc(&stats64->rx_packets);
> + u64_stats_add(&stats64->rx_bytes, len);
> u64_stats_update_end(&priv->syncp);
>
> napi_gro_receive(&priv->napi, skb);
> @@ -914,8 +920,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
> }
>
> u64_stats_update_begin(&priv->syncp);
> - ring->packets += pkts_compl;
> - ring->bytes += bytes_compl;
> + u64_stats_add(&ring->packets, pkts_compl);
> + u64_stats_add(&ring->bytes, bytes_compl);
> u64_stats_update_end(&priv->syncp);
here again, no need for device-wide lock in hot path.
>
> ring->c_index = c_index;
> @@ -1857,8 +1863,8 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
>
> do {
> start = u64_stats_fetch_begin(&priv->syncp);
> - stats->rx_packets = stats64->rx_packets;
> - stats->rx_bytes = stats64->rx_bytes;
> + stats->rx_packets = u64_stats_read(&stats64->rx_packets);
> + stats->rx_bytes = u64_stats_read(&stats64->rx_bytes);
> } while (u64_stats_fetch_retry(&priv->syncp, start));
> }
>
> diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
> index a34296f989f1..91b1c8293a23 100644
> --- a/drivers/net/ethernet/broadcom/bcmsysport.h
> +++ b/drivers/net/ethernet/broadcom/bcmsysport.h
> @@ -652,10 +652,10 @@ struct bcm_sysport_stats {
>
> struct bcm_sysport_stats64 {
> /* 64bit stats on 32bit/64bit Machine */
> - u64 rx_packets;
> - u64 rx_bytes;
> - u64 tx_packets;
> - u64 tx_bytes;
> + u64_stats_t rx_packets;
> + u64_stats_t rx_bytes;
> + u64_stats_t tx_packets;
> + u64_stats_t tx_bytes;
> };
>
> /* Software house keeping helper structure */
> @@ -698,8 +698,8 @@ struct bcm_sysport_tx_ring {
> unsigned int clean_index; /* Current clean index */
> struct bcm_sysport_cb *cbs; /* Transmit control blocks */
> struct bcm_sysport_priv *priv; /* private context backpointer */
> - unsigned long packets; /* packets statistics */
> - unsigned long bytes; /* bytes statistics */
> + u64_stats_t packets; /* packets statistics */
> + u64_stats_t bytes; /* bytes statistics */
> unsigned int switch_queue; /* switch port queue number */
> unsigned int switch_port; /* switch port queue number */
> bool inspect; /* inspect switch port and queue */
Powered by blists - more mailing lists