[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190128234507.32028-14-jakub.kicinski@netronome.com>
Date: Mon, 28 Jan 2019 15:45:06 -0800
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: davem@...emloft.net
Cc: oss-drivers@...ronome.com, netdev@...r.kernel.org,
jiri@...nulli.us, f.fainelli@...il.com, andrew@...n.ch,
mkubecek@...e.cz, dsahern@...il.com, simon.horman@...ronome.com,
jesse.brandeburg@...el.com, maciejromanfijalkowski@...il.com,
vasundhara-v.volam@...adcom.com, michael.chan@...adcom.com,
shalomt@...lanox.com, idosch@...lanox.com,
Jakub Kicinski <jakub.kicinski@...ronome.com>
Subject: [RFC 13/14] nfp: hstats: add a partial group of per-8021Q prio stats
The MAC maintains counts of PFC per-prio pause frames. Unfortunately,
I couldn't find such counters in any standard, so add a partial
group (HW doesn't give us "non-PFC" count) with those counters.
Signed-off-by: Jakub Kicinski <jakub.kicinski@...ronome.com>
---
.../net/ethernet/netronome/nfp/nfp_hstat.c | 44 +++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hstat.c b/drivers/net/ethernet/netronome/nfp/nfp_hstat.c
index cd97cd2676f6..ced4edca8b73 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hstat.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hstat.c
@@ -216,6 +216,49 @@ static const struct rtnl_hstat_group nfp_hstat_tm = {
.stats_cnt = 1,
};
+static int
+nfp_hstat_mac_pp_pause(struct net_device *netdev, struct rtnl_hstat_req *req,
+ const struct rtnl_hstat_group *grp)
+{
+ static const u32 remap_rx[] = {
+ 0xe0, 0xe8, 0xb0, 0xb8, 0xf0, 0xf7, 0x100, 0x108
+ };
+ static const u32 remap_tx[] = {
+ 0x1c0, 0x1c8, 0x1e0, 0x1e8, 0x1d0, 0x1d8, 0x1f0, 0x1f8
+ };
+ struct nfp_port *port;
+ const u32 *remap;
+ u8 dir, prio;
+
+ port = nfp_port_from_netdev(netdev);
+ if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
+ return -EINVAL;
+
+ prio = rtnl_hstat_qual_get(req, RTNL_HSTATS_QUAL_PRIORITY);
+ dir = rtnl_hstat_qual_get(req, RTNL_HSTATS_QUAL_DIRECTION);
+ remap = dir == IFLA_HSTATS_QUAL_DIR_RX ? remap_rx : remap_tx;
+
+ rtnl_hstat_dump(req, IFLA_HSTATS_STAT_IEEE8023_PAUSEMACCtrlFrames,
+ readq(port->eth_stats + remap[prio]));
+ return 0;
+}
+
+static const struct rtnl_hstat_group nfp_hstat_pp_pause = {
+ .qualifiers = {
+ RTNL_HSTATS_QUALS_BASIC_BIDIR(DEV),
+ [RTNL_HSTATS_QUAL_PRIORITY] = {
+ .max = 8,
+ },
+ },
+ .partial_flags = IFLA_HSTATS_PARTIAL_CLASSIFIER,
+
+ .get_stats = nfp_hstat_mac_pp_pause,
+ .stats = {
+ [1] = RTNL_HSTATS_STAT_IEEE8023_PAUSEMACCtrlFrames_BIT,
+ },
+ .stats_cnt = 1,
+};
+
/* NFD per-vNIC stats */
static int
nfp_hstat_vnic_nfd_basic_get(struct net_device *netdev,
@@ -402,6 +445,7 @@ int nfp_net_hstat_get_groups(const struct net_device *netdev,
port = nfp_port_from_netdev(netdev);
if (__nfp_port_get_eth_port(port) && port->eth_stats) {
rtnl_hstat_add_grp(req, &nfp_hstat_tm);
+ rtnl_hstat_add_grp(req, &nfp_hstat_pp_pause);
rtnl_hstat_add_grp(req, &nfp_hstat_mac);
}
--
2.19.2
Powered by blists - more mailing lists