[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b37ea0be-0f37-4a78-b6ce-fc49610c00cc@broadcom.com>
Date: Wed, 14 May 2025 10:45:23 +0200
From: Florian Fainelli <florian.fainelli@...adcom.com>
To: Zak Kemble <zakkemble@...il.com>, Doug Berger <opendmb@...il.com>,
Broadcom internal kernel review list
<bcm-kernel-feedback-list@...adcom.com>, Andrew Lunn
<andrew+netdev@...n.ch>, "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/3] net: bcmgenet: switch to use 64bit statistics
On 5/13/2025 4:41 PM, Zak Kemble wrote:
> Update the driver to use ndo_get_stats64, rtnl_link_stats64 and
> u64_stats_t counters for statistics.
>
> Signed-off-by: Zak Kemble <zakkemble@...il.com>
> ---
[snip]
>
> +
> +
This is unrelated to your changes.
> static void bcmgenet_get_ethtool_stats(struct net_device *dev,
> struct ethtool_stats *stats,
> u64 *data)
> {
> struct bcmgenet_priv *priv = netdev_priv(dev);
> + struct u64_stats_sync *syncp;
> + struct rtnl_link_stats64 stats64;
> + unsigned int start;
> int i;
>
> if (netif_running(dev))
> bcmgenet_update_mib_counters(priv);
>
> - dev->netdev_ops->ndo_get_stats(dev);
> + dev_get_stats(dev, &stats64);
>
> for (i = 0; i < BCMGENET_STATS_LEN; i++) {
> const struct bcmgenet_stats *s;
> char *p;
>
> s = &bcmgenet_gstrings_stats[i];
> - if (s->type == BCMGENET_STAT_NETDEV)
> - p = (char *)&dev->stats;
> + if (s->type == BCMGENET_STAT_RTNL)
> + p = (char *)&stats64;
> else
> p = (char *)priv;
> p += s->stat_offset;
> - if (sizeof(unsigned long) != sizeof(u32) &&
> + if (s->type == BCMGENET_STAT_SOFT64) {
> + syncp = (struct u64_stats_sync *)(p - s->stat_offset +
> + s->syncp_offset);
This is a bit difficult to read, but I understand why you would want to
do something like this to avoid discerning the rx from the tx stats...
> + do {
> + start = u64_stats_fetch_begin(syncp);
> + data[i] = u64_stats_read((u64_stats_t *)p);
> + } while (u64_stats_fetch_retry(syncp, start));
> + } else if (sizeof(unsigned long) != sizeof(u32) &&
> s->stat_sizeof == sizeof(unsigned long))
> data[i] = *(unsigned long *)p;
> else
> @@ -1857,6 +1881,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
> struct bcmgenet_tx_ring *ring)
> {
> struct bcmgenet_priv *priv = netdev_priv(dev);
> + struct bcmgenet_tx_stats64 *stats = &ring->stats64;
> unsigned int txbds_processed = 0;
> unsigned int bytes_compl = 0;
> unsigned int pkts_compl = 0;
> @@ -1896,8 +1921,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
> ring->free_bds += txbds_processed;
> ring->c_index = c_index;
>
> - ring->packets += pkts_compl;
> - ring->bytes += bytes_compl;
> + u64_stats_update_begin(&stats->syncp);
> + u64_stats_add(&stats->packets, pkts_compl);
> + u64_stats_add(&stats->bytes, bytes_compl);
> + u64_stats_update_end(&stats->syncp);
>
> netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
> pkts_compl, bytes_compl);
> @@ -1983,9 +2010,11 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
> * the transmit checksum offsets in the descriptors
> */
> static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
> - struct sk_buff *skb)
> + struct sk_buff *skb,
> + struct bcmgenet_tx_ring *ring)
> {
> struct bcmgenet_priv *priv = netdev_priv(dev);
> + struct bcmgenet_tx_stats64 *stats = &ring->stats64;
> struct status_64 *status = NULL;
> struct sk_buff *new_skb;
> u16 offset;
> @@ -2001,7 +2030,9 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
> if (!new_skb) {
> dev_kfree_skb_any(skb);
> priv->mib.tx_realloc_tsb_failed++;
> - dev->stats.tx_dropped++;
> + u64_stats_update_begin(&stats->syncp);
> + u64_stats_inc(&stats->dropped);
> + u64_stats_update_end(&stats->syncp);
> return NULL;
> }
> dev_consume_skb_any(skb);
> @@ -2089,7 +2120,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
> GENET_CB(skb)->bytes_sent = skb->len;
>
> /* add the Transmit Status Block */
> - skb = bcmgenet_add_tsb(dev, skb);
> + skb = bcmgenet_add_tsb(dev, skb, ring);
> if (!skb) {
> ret = NETDEV_TX_OK;
> goto out;
> @@ -2233,6 +2264,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
> {
> struct bcmgenet_priv *priv = ring->priv;
> struct net_device *dev = priv->dev;
> + struct bcmgenet_rx_stats64 *stats = &ring->stats64;
> struct enet_cb *cb;
> struct sk_buff *skb;
> u32 dma_length_status;
> @@ -2253,7 +2285,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
> DMA_P_INDEX_DISCARD_CNT_MASK;
> if (discards > ring->old_discards) {
> discards = discards - ring->old_discards;
> - ring->errors += discards;
> + u64_stats_update_begin(&stats->syncp);
> + u64_stats_add(&stats->errors, discards);
> + u64_stats_update_end(&stats->syncp);
> ring->old_discards += discards;
Cannot you fold the update into a single block?
>
> /* Clear HW register when we reach 75% of maximum 0xFFFF */
> @@ -2279,7 +2313,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
> skb = bcmgenet_rx_refill(priv, cb);
>
> if (unlikely(!skb)) {
> - ring->dropped++;
> + u64_stats_update_begin(&stats->syncp);
> + u64_stats_inc(&stats->dropped);
> + u64_stats_update_end(&stats->syncp);
> goto next;
Similar comment as above, this would be better moved to a single
location, and this goes on below.
--
Florian
Powered by blists - more mailing lists