lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <e0c10323e86c0f5bb2b6d0d5d980b70789fa09a2.1581886691.git.lorenzo@kernel.org>
Date:   Sun, 16 Feb 2020 22:07:30 +0100
From:   Lorenzo Bianconi <lorenzo@...nel.org>
To:     netdev@...r.kernel.org
Cc:     ilias.apalodimas@...aro.org, davem@...emloft.net,
        lorenzo.bianconi@...hat.com, brouer@...hat.com
Subject: [PATCH net-next 2/5] net: mvneta: rely on open-coding updating stats for non-xdp and tx path

In oreder to avoid unnecessary instructions rely on open-coding updating
per-cpu stats in mvneta_tx/mvneta_xdp_submit_frame and mvneta_rx_hwbm
routines. This patch will be used to add xdp support to ethtool for the
mvneta driver

Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 40 ++++++++++++++++-----------
 1 file changed, 24 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7433a305e0ff..6552b84be7c9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1945,19 +1945,13 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 }
 
 static void
-mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
-		    u32 len, bool tx)
+mvneta_update_stats(struct mvneta_port *pp, u32 pkts, u32 len)
 {
 	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
 	u64_stats_update_begin(&stats->syncp);
-	if (tx) {
-		stats->tx_packets += pkts;
-		stats->tx_bytes += len;
-	} else {
-		stats->rx_packets += pkts;
-		stats->rx_bytes += len;
-	}
+	stats->rx_packets += pkts;
+	stats->rx_bytes += len;
 	u64_stats_update_end(&stats->syncp);
 }
 
@@ -1996,6 +1990,7 @@ static int
 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
 			struct xdp_frame *xdpf, bool dma_map)
 {
+	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 	struct mvneta_tx_desc *tx_desc;
 	struct mvneta_tx_buf *buf;
 	dma_addr_t dma_addr;
@@ -2030,7 +2025,11 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
 	tx_desc->buf_phys_addr = dma_addr;
 	tx_desc->data_size = xdpf->len;
 
-	mvneta_update_stats(pp, 1, xdpf->len, true);
+	u64_stats_update_begin(&stats->syncp);
+	stats->tx_bytes += xdpf->len;
+	stats->tx_packets++;
+	u64_stats_update_end(&stats->syncp);
+
 	mvneta_txq_inc_put(txq);
 	txq->pending++;
 	txq->count++;
@@ -2189,8 +2188,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
 		ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
 		if (ret != MVNETA_XDP_PASS) {
 			mvneta_update_stats(pp, 1,
-					    xdp->data_end - xdp->data,
-					    false);
+					    xdp->data_end - xdp->data);
 			rx_desc->buf_phys_addr = 0;
 			*xdp_ret |= ret;
 			return ret;
@@ -2340,7 +2338,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 		xdp_do_flush_map();
 
 	if (rcvd_pkts)
-		mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+		mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes);
 
 	/* return some buffers to hardware queue, one at a time is too slow */
 	refill = mvneta_rx_refill_queue(pp, rxq);
@@ -2470,8 +2468,14 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
 		napi_gro_receive(napi, skb);
 	}
 
-	if (rcvd_pkts)
-		mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+	if (rcvd_pkts) {
+		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->rx_packets += rcvd_pkts;
+		stats->rx_bytes += rcvd_bytes;
+		u64_stats_update_end(&stats->syncp);
+	}
 
 	/* Update rxq management counters */
 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
@@ -2727,6 +2731,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 out:
 	if (frags > 0) {
 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
 		netdev_tx_sent_queue(nq, len);
 
@@ -2740,7 +2745,10 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 		else
 			txq->pending += frags;
 
-		mvneta_update_stats(pp, 1, len, true);
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_bytes += len;
+		stats->tx_packets++;
+		u64_stats_update_end(&stats->syncp);
 	} else {
 		dev->stats.tx_dropped++;
 		dev_kfree_skb_any(skb);
-- 
2.24.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ