[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180402203120.14039-1-jakub.kicinski@netronome.com>
Date: Mon, 2 Apr 2018 13:31:20 -0700
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org, oss-drivers@...ronome.com,
Jakub Kicinski <jakub.kicinski@...ronome.com>
Subject: [PATCH net-next] nfp: add a separate counter for packets with CHECKSUM_COMPLETE
We are currently counting packets with CHECKSUM_COMPLETE as
"hw_rx_csum_ok". This is confusing. Add a new counter.
To make sure it fits in the same cacheline move the less used
error counter to a different location.
Signed-off-by: Jakub Kicinski <jakub.kicinski@...ronome.com>
Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@...ronome.com>
---
drivers/net/ethernet/netronome/nfp/nfp_net.h | 4 +++-
drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 +-
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 16 +++++++++-------
3 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 787df47ec430..bd7d8ae31e17 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -391,6 +391,7 @@ struct nfp_net_rx_ring {
* @rx_drops: Number of packets dropped on RX due to lack of resources
* @hw_csum_rx_ok: Counter of packets where the HW checksum was OK
* @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
+ * @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported
* @hw_csum_rx_error: Counter of packets with bad checksums
* @tx_sync: Seqlock for atomic updates of TX stats
* @tx_pkts: Number of Transmitted packets
@@ -434,7 +435,7 @@ struct nfp_net_r_vector {
u64 rx_drops;
u64 hw_csum_rx_ok;
u64 hw_csum_rx_inner_ok;
- u64 hw_csum_rx_error;
+ u64 hw_csum_rx_complete;
struct nfp_net_tx_ring *xdp_ring;
@@ -446,6 +447,7 @@ struct nfp_net_r_vector {
u64 tx_gather;
u64 tx_lso;
+ u64 hw_csum_rx_error;
u64 rx_replace_buf_alloc_fail;
u64 tx_errors;
u64 tx_busy;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 43a9c207a049..1eb6549f2a54 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1406,7 +1406,7 @@ static void nfp_net_rx_csum(struct nfp_net_dp *dp,
skb->ip_summed = meta->csum_type;
skb->csum = meta->csum;
u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_ok++;
+ r_vec->hw_csum_rx_complete++;
u64_stats_update_end(&r_vec->rx_sync);
return;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index e1dae0616f52..c9016419bfa0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -179,7 +179,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
-#define NN_RVEC_GATHER_STATS 8
+#define NN_RVEC_GATHER_STATS 9
#define NN_RVEC_PER_Q_STATS 3
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
@@ -468,6 +468,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_ok");
data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
+ data = nfp_pr_et(data, "hw_rx_csum_complete");
data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
data = nfp_pr_et(data, "hw_tx_csum");
@@ -493,18 +494,19 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
- tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
- tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
+ tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
+ tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
+ tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
- tmp[4] = nn->r_vecs[i].hw_csum_tx;
- tmp[5] = nn->r_vecs[i].hw_csum_tx_inner;
- tmp[6] = nn->r_vecs[i].tx_gather;
- tmp[7] = nn->r_vecs[i].tx_lso;
+ tmp[5] = nn->r_vecs[i].hw_csum_tx;
+ tmp[6] = nn->r_vecs[i].hw_csum_tx_inner;
+ tmp[7] = nn->r_vecs[i].tx_gather;
+ tmp[8] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
--
2.16.2
Powered by blists - more mailing lists