lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <151927043633.1793.16876433797401023263.stgit@ltcalpine2-lp14.aus.stglabs.ibm.com>
Date:   Wed, 21 Feb 2018 21:33:56 -0600
From:   Nathan Fontenot <nfont@...ux.vnet.ibm.com>
To:     netdev@...r.kernel.org
Cc:     jallen@...ux.vnet.ibm.com, tlfalcon@...ux.vnet.ibm.com
Subject: [PATCH net-next] ibmvnic: Split counters for scrq/pools/napi

The approach of one counter to rule them all when tracking the number
of active sub-crqs, pools, and napi has problems handling some failover
scenarios. This is due to the split in initializing the sub crqs,
pools and napi in different places and the placement of updating
the active counts.

This patch simplifies this by having a counter for tx and rx
sub-crqs, pools, and napi.

Signed-off-by: Nathan Fontenot <nfont@...ux.vnet.ibm.com>
---
 drivers/net/ethernet/ibm/ibmvnic.c |   38 ++++++++++++++++--------------------
 drivers/net/ethernet/ibm/ibmvnic.h |    7 +++++--
 2 files changed, 22 insertions(+), 23 deletions(-)

diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1703b881252f..8ca88f7cc661 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -461,7 +461,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
 	if (!adapter->rx_pool)
 		return;
 
-	for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
+	for (i = 0; i < adapter->num_active_rx_pools; i++) {
 		rx_pool = &adapter->rx_pool[i];
 
 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -484,6 +484,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
 
 	kfree(adapter->rx_pool);
 	adapter->rx_pool = NULL;
+	adapter->num_active_rx_pools = 0;
 }
 
 static int init_rx_pools(struct net_device *netdev)
@@ -508,6 +509,8 @@ static int init_rx_pools(struct net_device *netdev)
 		return -1;
 	}
 
+	adapter->num_active_rx_pools = rxadd_subcrqs;
+
 	for (i = 0; i < rxadd_subcrqs; i++) {
 		rx_pool = &adapter->rx_pool[i];
 
@@ -608,7 +611,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
 	if (!adapter->tx_pool)
 		return;
 
-	for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
+	for (i = 0; i < adapter->num_active_tx_pools; i++) {
 		netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
 		tx_pool = &adapter->tx_pool[i];
 		kfree(tx_pool->tx_buff);
@@ -619,6 +622,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
 
 	kfree(adapter->tx_pool);
 	adapter->tx_pool = NULL;
+	adapter->num_active_tx_pools = 0;
 }
 
 static int init_tx_pools(struct net_device *netdev)
@@ -635,6 +639,8 @@ static int init_tx_pools(struct net_device *netdev)
 	if (!adapter->tx_pool)
 		return -1;
 
+	adapter->num_active_tx_pools = tx_subcrqs;
+
 	for (i = 0; i < tx_subcrqs; i++) {
 		tx_pool = &adapter->tx_pool[i];
 
@@ -745,6 +751,7 @@ static int init_napi(struct ibmvnic_adapter *adapter)
 			       ibmvnic_poll, NAPI_POLL_WEIGHT);
 	}
 
+	adapter->num_active_rx_napi = adapter->req_rx_queues;
 	return 0;
 }
 
@@ -755,7 +762,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
 	if (!adapter->napi)
 		return;
 
-	for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
+	for (i = 0; i < adapter->num_active_rx_napi; i++) {
 		if (&adapter->napi[i]) {
 			netdev_dbg(adapter->netdev,
 				   "Releasing napi[%d]\n", i);
@@ -765,6 +772,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
 
 	kfree(adapter->napi);
 	adapter->napi = NULL;
+	adapter->num_active_rx_napi = 0;
 }
 
 static int ibmvnic_login(struct net_device *netdev)
@@ -998,10 +1006,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
 		return rc;
 
 	rc = init_tx_pools(netdev);
-
-	adapter->num_active_tx_scrqs = adapter->req_tx_queues;
-	adapter->num_active_rx_scrqs = adapter->req_rx_queues;
-
 	return rc;
 }
 
@@ -1706,9 +1710,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
 			release_napi(adapter);
 			init_napi(adapter);
-
-			adapter->num_active_tx_scrqs = adapter->req_tx_queues;
-			adapter->num_active_rx_scrqs = adapter->req_rx_queues;
 		} else {
 			rc = reset_tx_pools(adapter);
 			if (rc)
@@ -2398,19 +2399,10 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
 
 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
 {
-	u64 num_tx_scrqs, num_rx_scrqs;
 	int i;
 
-	if (adapter->state == VNIC_PROBED) {
-		num_tx_scrqs = adapter->req_tx_queues;
-		num_rx_scrqs = adapter->req_rx_queues;
-	} else {
-		num_tx_scrqs = adapter->num_active_tx_scrqs;
-		num_rx_scrqs = adapter->num_active_rx_scrqs;
-	}
-
 	if (adapter->tx_scrq) {
-		for (i = 0; i < num_tx_scrqs; i++) {
+		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
 			if (!adapter->tx_scrq[i])
 				continue;
 
@@ -2429,10 +2421,11 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
 
 		kfree(adapter->tx_scrq);
 		adapter->tx_scrq = NULL;
+		adapter->num_active_tx_scrqs = 0;
 	}
 
 	if (adapter->rx_scrq) {
-		for (i = 0; i < num_rx_scrqs; i++) {
+		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
 			if (!adapter->rx_scrq[i])
 				continue;
 
@@ -2451,6 +2444,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
 
 		kfree(adapter->rx_scrq);
 		adapter->rx_scrq = NULL;
+		adapter->num_active_rx_scrqs = 0;
 	}
 }
 
@@ -2718,6 +2712,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
 	for (i = 0; i < adapter->req_tx_queues; i++) {
 		adapter->tx_scrq[i] = allqueues[i];
 		adapter->tx_scrq[i]->pool_index = i;
+		adapter->num_active_tx_scrqs++;
 	}
 
 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -2728,6 +2723,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
 	for (i = 0; i < adapter->req_rx_queues; i++) {
 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
 		adapter->rx_scrq[i]->scrq_num = i;
+		adapter->num_active_rx_scrqs++;
 	}
 
 	kfree(allqueues);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 68e712c69211..099c89d49945 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1092,8 +1092,11 @@ struct ibmvnic_adapter {
 	u64 opt_rxba_entries_per_subcrq;
 	__be64 tx_rx_desc_req;
 	u8 map_id;
-	u64 num_active_rx_scrqs;
-	u64 num_active_tx_scrqs;
+	u32 num_active_rx_scrqs;
+	u32 num_active_rx_pools;
+	u32 num_active_rx_napi;
+	u32 num_active_tx_scrqs;
+	u32 num_active_tx_pools;
 
 	struct tasklet_struct tasklet;
 	enum vnic_state state;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ