lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 29 Jun 2018 17:04:34 -0700
From:   Jakub Kicinski <jakub.kicinski@...ronome.com>
To:     davem@...emloft.net
Cc:     oss-drivers@...ronome.com, netdev@...r.kernel.org,
        Jakub Kicinski <jakub.kicinski@...ronome.com>
Subject: [PATCH net-next 1/9] nfp: expose ring stats of inactive rings via ethtool

After user changes the ring count statistics for deactivated
rings disappear from ethtool -S output.  This causes loss of
information to the user and means that ethtool stats may not
add up to interface stats.  Always expose counters from all
the rings.  Note that we allocate at most num_possible_cpus()
rings so number of rings should be reasonable.

The alternative of only listing stats for rings which were
ever in use could be confusing.

Signed-off-by: Jakub Kicinski <jakub.kicinski@...ronome.com>
Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@...ronome.com>
---
 .../ethernet/netronome/nfp/nfp_net_ethtool.c  | 50 +++++++------------
 1 file changed, 19 insertions(+), 31 deletions(-)

diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 26d1cc4e2906..2aeb4622f1ea 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -452,7 +452,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
 {
 	struct nfp_net *nn = netdev_priv(netdev);
 
-	return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
+	return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
 }
 
 static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -460,7 +460,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
 	struct nfp_net *nn = netdev_priv(netdev);
 	int i;
 
-	for (i = 0; i < nn->dp.num_r_vecs; i++) {
+	for (i = 0; i < nn->max_r_vecs; i++) {
 		data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
 		data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
 		data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
@@ -486,7 +486,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
 	u64 tmp[NN_RVEC_GATHER_STATS];
 	unsigned int i, j;
 
-	for (i = 0; i < nn->dp.num_r_vecs; i++) {
+	for (i = 0; i < nn->max_r_vecs; i++) {
 		unsigned int start;
 
 		do {
@@ -521,15 +521,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
 	return data;
 }
 
-static unsigned int
-nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings)
+static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
 {
-	return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2;
+	return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
 }
 
 static u8 *
-nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
-			      unsigned int tx_rings, bool repr)
+nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
 {
 	int swap_off, i;
 
@@ -549,36 +547,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
 	for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
 		data = nfp_pr_et(data, nfp_net_et_stats[i].name);
 
-	for (i = 0; i < tx_rings; i++) {
-		data = nfp_pr_et(data, "txq_%u_pkts", i);
-		data = nfp_pr_et(data, "txq_%u_bytes", i);
-	}
-
-	for (i = 0; i < rx_rings; i++) {
+	for (i = 0; i < num_vecs; i++) {
 		data = nfp_pr_et(data, "rxq_%u_pkts", i);
 		data = nfp_pr_et(data, "rxq_%u_bytes", i);
+		data = nfp_pr_et(data, "txq_%u_pkts", i);
+		data = nfp_pr_et(data, "txq_%u_bytes", i);
 	}
 
 	return data;
 }
 
 static u64 *
-nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem,
-		      unsigned int rx_rings, unsigned int tx_rings)
+nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
 {
 	unsigned int i;
 
 	for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
 		*data++ = readq(mem + nfp_net_et_stats[i].off);
 
-	for (i = 0; i < tx_rings; i++) {
-		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
-		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
-	}
-
-	for (i = 0; i < rx_rings; i++) {
+	for (i = 0; i < num_vecs; i++) {
 		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
 		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
 	}
 
 	return data;
@@ -633,8 +624,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
 	switch (stringset) {
 	case ETH_SS_STATS:
 		data = nfp_vnic_get_sw_stats_strings(netdev, data);
-		data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings,
-						     nn->dp.num_tx_rings,
+		data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
 						     false);
 		data = nfp_mac_get_stats_strings(netdev, data);
 		data = nfp_app_port_get_stats_strings(nn->port, data);
@@ -649,8 +639,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
 	struct nfp_net *nn = netdev_priv(netdev);
 
 	data = nfp_vnic_get_sw_stats(netdev, data);
-	data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
-				     nn->dp.num_rx_rings, nn->dp.num_tx_rings);
+	data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
 	data = nfp_mac_get_stats(netdev, data);
 	data = nfp_app_port_get_stats(nn->port, data);
 }
@@ -662,8 +651,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
 	switch (sset) {
 	case ETH_SS_STATS:
 		return nfp_vnic_get_sw_stats_count(netdev) +
-		       nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
-						   nn->dp.num_tx_rings) +
+		       nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
 		       nfp_mac_get_stats_count(netdev) +
 		       nfp_app_port_get_stats_count(nn->port);
 	default:
@@ -679,7 +667,7 @@ static void nfp_port_get_strings(struct net_device *netdev,
 	switch (stringset) {
 	case ETH_SS_STATS:
 		if (nfp_port_is_vnic(port))
-			data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true);
+			data = nfp_vnic_get_hw_stats_strings(data, 0, true);
 		else
 			data = nfp_mac_get_stats_strings(netdev, data);
 		data = nfp_app_port_get_stats_strings(port, data);
@@ -694,7 +682,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
 	struct nfp_port *port = nfp_port_from_netdev(netdev);
 
 	if (nfp_port_is_vnic(port))
-		data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0);
+		data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
 	else
 		data = nfp_mac_get_stats(netdev, data);
 	data = nfp_app_port_get_stats(port, data);
@@ -708,7 +696,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
 	switch (sset) {
 	case ETH_SS_STATS:
 		if (nfp_port_is_vnic(port))
-			count = nfp_vnic_get_hw_stats_count(0, 0);
+			count = nfp_vnic_get_hw_stats_count(0);
 		else
 			count = nfp_mac_get_stats_count(netdev);
 		count += nfp_app_port_get_stats_count(port);
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ