lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1513416663-9321-1-git-send-email-yanjun.zhu@oracle.com>
Date:   Sat, 16 Dec 2017 04:31:03 -0500
From:   Zhu Yanjun <yanjun.zhu@...cle.com>
To:     netdev@...r.kernel.org, stephen@...workplumber.org
Subject: [PATCH net-next 1/1] forcedeth: remove duplicate structure member in xmit

Since both first_tx_ctx and tx_skb are the head of tx ctx, it not
necessary to use two structure members to statically indicate
the head of tx ctx. So first_tx_ctx is removed.

CC: Srinivas Eeda <srinivas.eeda@...cle.com>
CC: Joe Jin <joe.jin@...cle.com>
CC: Junxiao Bi <junxiao.bi@...cle.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@...cle.com>
---
 drivers/net/ethernet/nvidia/forcedeth.c | 25 +++++++++++++------------
 1 file changed, 13 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index cadea67..49d6d78 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -824,7 +824,7 @@ struct fe_priv {
 	 */
 	union ring_type get_tx, put_tx, last_tx;
 	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
-	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+	struct nv_skb_map *last_tx_ctx;
 	struct nv_skb_map *tx_skb;
 
 	union ring_type tx_ring;
@@ -1939,7 +1939,8 @@ static void nv_init_tx(struct net_device *dev)
 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
 	else
 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
-	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+	np->get_tx_ctx = np->tx_skb;
+	np->put_tx_ctx = np->tx_skb;
 	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
 	netdev_reset_queue(np->dev);
 	np->tx_pkts_in_progress = 0;
@@ -2251,7 +2252,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		if (unlikely(put_tx++ == np->last_tx.orig))
 			put_tx = np->tx_ring.orig;
 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-			np->put_tx_ctx = np->first_tx_ctx;
+			np->put_tx_ctx = np->tx_skb;
 	} while (size);
 
 	/* setup the fragments */
@@ -2277,7 +2278,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 				do {
 					nv_unmap_txskb(np, start_tx_ctx);
 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
-						tmp_tx_ctx = np->first_tx_ctx;
+						tmp_tx_ctx = np->tx_skb;
 				} while (tmp_tx_ctx != np->put_tx_ctx);
 				dev_kfree_skb_any(skb);
 				np->put_tx_ctx = start_tx_ctx;
@@ -2297,7 +2298,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 			if (unlikely(put_tx++ == np->last_tx.orig))
 				put_tx = np->tx_ring.orig;
 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-				np->put_tx_ctx = np->first_tx_ctx;
+				np->put_tx_ctx = np->tx_skb;
 		} while (frag_size);
 	}
 
@@ -2306,7 +2307,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	else
 		prev_tx = put_tx - 1;
 
-	if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+	if (unlikely(np->put_tx_ctx == np->tx_skb))
 		prev_tx_ctx = np->last_tx_ctx;
 	else
 		prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2409,7 +2410,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 		if (unlikely(put_tx++ == np->last_tx.ex))
 			put_tx = np->tx_ring.ex;
 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-			np->put_tx_ctx = np->first_tx_ctx;
+			np->put_tx_ctx = np->tx_skb;
 	} while (size);
 
 	/* setup the fragments */
@@ -2435,7 +2436,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 				do {
 					nv_unmap_txskb(np, start_tx_ctx);
 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
-						tmp_tx_ctx = np->first_tx_ctx;
+						tmp_tx_ctx = np->tx_skb;
 				} while (tmp_tx_ctx != np->put_tx_ctx);
 				dev_kfree_skb_any(skb);
 				np->put_tx_ctx = start_tx_ctx;
@@ -2455,7 +2456,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 			if (unlikely(put_tx++ == np->last_tx.ex))
 				put_tx = np->tx_ring.ex;
 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-				np->put_tx_ctx = np->first_tx_ctx;
+				np->put_tx_ctx = np->tx_skb;
 		} while (frag_size);
 	}
 
@@ -2464,7 +2465,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 	else
 		prev_tx = put_tx - 1;
 
-	if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+	if (unlikely(np->put_tx_ctx == np->tx_skb))
 		prev_tx_ctx = np->last_tx_ctx;
 	else
 		prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2600,7 +2601,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
 		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
 			np->get_tx.orig = np->tx_ring.orig;
 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
-			np->get_tx_ctx = np->first_tx_ctx;
+			np->get_tx_ctx = np->tx_skb;
 	}
 
 	netdev_completed_queue(np->dev, tx_work, bytes_compl);
@@ -2654,7 +2655,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
 			np->get_tx.ex = np->tx_ring.ex;
 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
-			np->get_tx_ctx = np->first_tx_ctx;
+			np->get_tx_ctx = np->tx_skb;
 	}
 
 	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ