[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171102195827.GM24320@lunn.ch>
Date: Thu, 2 Nov 2017 20:58:27 +0100
From: Andrew Lunn <andrew@...n.ch>
To: Miquel Raynal <miquel.raynal@...e-electrons.com>
Cc: "David S . Miller" <davem@...emloft.net>,
Thomas Petazzoni <thomas.petazzoni@...e-electrons.com>,
Antoine Tenart <antoine.tenart@...e-electrons.com>,
Gregory Clement <gregory.clement@...e-electrons.com>,
Nadav Haklai <nadavh@...vell.com>, netdev@...r.kernel.org,
Stefan Chulski <stefanc@...vell.com>
Subject: Re: [PATCH] net: mvpp2: add ethtool GOP statistics
Hi Miquel
> +static struct mvpp2_ethtool_statistics mvpp2_ethtool_stats[] = {
This can probably be const, and save a few bytes of RAM.
> + { MVPP2_MIB_GOOD_OCTETS_RCVD_LOW, "good_octets_received" },
> + { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
> + { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
> + { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
> + { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
> + { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
> + { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
> + { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
> + { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
> + { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
> + { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
> + { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
> + { MVPP2_MIB_GOOD_OCTETS_SENT_LOW, "good_octets_sent" },
> + { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
> + { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
> + { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
> + { MVPP2_MIB_FC_SENT, "fc_sent" },
> + { MVPP2_MIB_FC_RCVD, "fc_received" },
> + { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
> + { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
> + { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
> + { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
> + { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
> + { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
> + { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
> + { MVPP2_MIB_COLLISION, "collision" },
> + { MVPP2_MIB_LATE_COLLISION, "late_collision" },
> +};
> +static void mvpp2_gather_hw_statistics(struct work_struct *work)
> +{
> + struct delayed_work *del_work = to_delayed_work(work);
> + struct mvpp2 *priv = container_of(del_work, struct mvpp2, stats_work);
> + struct mvpp2_port *port;
> + u64 *pstats;
> + int i, j;
> +
> + for (i = 0; i < priv->port_count; i++) {
> + if (!priv->port_list[i])
> + continue;
> +
> + port = priv->port_list[i];
> + pstats = port->ethtool_stats;
> + for (j = 0; j < ARRAY_SIZE(mvpp2_ethtool_stats); j++)
> + *pstats++ += mvpp2_read_count(
> + port, mvpp2_ethtool_stats[j].offset);
> + }
> +
> + /* No need to read again the counters right after this function if it
> + * was called asynchronously by the user (ie. use of ethtool).
> + */
> + cancel_delayed_work(&priv->stats_work);
> + queue_delayed_work(priv->stats_queue, &priv->stats_work,
> + MVPP2_MIB_COUNTERS_STATS_DELAY);
> +}
> +
> +static void mvpp2_ethtool_get_stats(struct net_device *dev,
> + struct ethtool_stats *stats, u64 *data)
> +{
> + struct mvpp2_port *port = netdev_priv(dev);
> +
> + /* Update statistics for all ports, copy only those actually needed */
> + mvpp2_gather_hw_statistics(&port->priv->stats_work.work);
Shouldn't there be some locking here? What if
mvpp2_gather_hw_statistic is already running?
> @@ -7613,13 +7788,19 @@ static int mvpp2_port_probe(struct platform_device *pdev,
> port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
> }
>
> - /* Alloc per-cpu stats */
> + /* Alloc per-cpu and ethtool stats */
> port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
> if (!port->stats) {
> err = -ENOMEM;
> goto err_free_irq;
> }
>
> + port->ethtool_stats = kzalloc(sizeof(mvpp2_ethtool_stats), GFP_KERNEL);
devm_ to make the cleanup simpler?
> + /* This work recall himself within a delay. If the cancellation returned
> + * a non-zero value, it means a work is still running. In that case, use
> + * use the flush (returns when the running work will be done) and cancel
One use is enough.
> + * the new work that was just submitted to the queue but not started yet
> + * due to the delay.
> + */
> + if (!cancel_delayed_work(&priv->stats_work)) {
> + flush_workqueue(priv->stats_queue);
> + cancel_delayed_work(&priv->stats_work);
> + }
Why is cancel_delayed_work_sync() not enough?
Andrew
Powered by blists - more mailing lists