lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 18 Jan 2018 16:26:31 -0600
From:   John Allen <jallen@...ux.vnet.ibm.com>
To:     netdev@...r.kernel.org
Cc:     Thomas Falcon <tlfalcon@...ux.vnet.ibm.com>,
        Nathan Fontenot <nfont@...ux.vnet.ibm.com>
Subject: [PATCH net v2 1/3] ibmvnic: Modify buffer size and number of queues
 on failover

Using newer backing devices can cause the required padding at the end of
buffer as well as the number of queues to change after a failover.
Since we currently assume that these values never change, after a
failover to a backing device with different capabilities, we can get
errors from the vnic server, attempt to free long term buffers that are
no longer there, or not free long term buffers that should be freed.

This patch resolves the issue by checking whether any of these values
change, and if so perform the necessary re-allocations.

Signed-off-by: John Allen <jallen@...ux.vnet.ibm.com>
---
v2: Added the line to free the long term buff in reset rx pools which
caused me to hit a couple of other problems. On a failover, the number
of queues can also change which doesn't get updated in the current
reset code. Added some extra logic in do reset to check for changed
number of queues and added some logic to release pools to ensure that
it is only releasing pools that were allocated in the init pools
routines.
---
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 461014b7ccdd..12559c63c226 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -411,6 +411,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
 	struct ibmvnic_rx_pool *rx_pool;
 	int rx_scrqs;
 	int i, j, rc;
+	u64 *size_array;
+
+	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));

 	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 	for (i = 0; i < rx_scrqs; i++) {
@@ -418,7 +422,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)

 		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);

-		rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+		if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+			free_long_term_buff(adapter, &rx_pool->long_term_buff);
+			rx_pool->buff_size = be64_to_cpu(size_array[i]);
+			alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+					     rx_pool->size *
+					     rx_pool->buff_size);
+		} else {
+			rc = reset_long_term_buff(adapter,
+						  &rx_pool->long_term_buff);
+		}
+
 		if (rc)
 			return rc;

@@ -440,14 +454,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
 static void release_rx_pools(struct ibmvnic_adapter *adapter)
 {
 	struct ibmvnic_rx_pool *rx_pool;
-	int rx_scrqs;
 	int i, j;

 	if (!adapter->rx_pool)
 		return;

-	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
-	for (i = 0; i < rx_scrqs; i++) {
+	for (i = 0; i < adapter->num_active_rx_pools; i++) {
 		rx_pool = &adapter->rx_pool[i];

 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -470,6 +482,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)

 	kfree(adapter->rx_pool);
 	adapter->rx_pool = NULL;
+	adapter->num_active_rx_pools = 0;
 }

 static int init_rx_pools(struct net_device *netdev)
@@ -494,6 +507,8 @@ static int init_rx_pools(struct net_device *netdev)
 		return -1;
 	}

+	adapter->num_active_rx_pools = 0;
+
 	for (i = 0; i < rxadd_subcrqs; i++) {
 		rx_pool = &adapter->rx_pool[i];

@@ -537,6 +552,8 @@ static int init_rx_pools(struct net_device *netdev)
 		rx_pool->next_free = 0;
 	}

+	adapter->num_active_rx_pools = rxadd_subcrqs;
+
 	return 0;
 }

@@ -587,13 +604,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
 static void release_tx_pools(struct ibmvnic_adapter *adapter)
 {
 	struct ibmvnic_tx_pool *tx_pool;
-	int i, tx_scrqs;
+	int i;

 	if (!adapter->tx_pool)
 		return;

-	tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
-	for (i = 0; i < tx_scrqs; i++) {
+	for (i = 0; i < adapter->num_active_tx_pools; i++) {
 		netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
 		tx_pool = &adapter->tx_pool[i];
 		kfree(tx_pool->tx_buff);
@@ -604,6 +620,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)

 	kfree(adapter->tx_pool);
 	adapter->tx_pool = NULL;
+	adapter->num_active_tx_pools = 0;
 }

 static int init_tx_pools(struct net_device *netdev)
@@ -620,6 +637,8 @@ static int init_tx_pools(struct net_device *netdev)
 	if (!adapter->tx_pool)
 		return -1;

+	adapter->num_active_tx_pools = 0;
+
 	for (i = 0; i < tx_subcrqs; i++) {
 		tx_pool = &adapter->tx_pool[i];

@@ -667,6 +686,8 @@ static int init_tx_pools(struct net_device *netdev)
 		tx_pool->producer_index = 0;
 	}

+	adapter->num_active_tx_pools = tx_subcrqs;
+
 	return 0;
 }

@@ -1550,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
 static int do_reset(struct ibmvnic_adapter *adapter,
 		    struct ibmvnic_rwi *rwi, u32 reset_state)
 {
+	u64 old_num_rx_queues, old_num_tx_queues;
 	struct net_device *netdev = adapter->netdev;
 	int i, rc;

@@ -1559,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 	netif_carrier_off(netdev);
 	adapter->reset_reason = rwi->reset_reason;

+	old_num_rx_queues = adapter->req_rx_queues;
+	old_num_tx_queues = adapter->req_tx_queues;
+
 	if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
 		rc = ibmvnic_reenable_crq_queue(adapter);
 		if (rc)
@@ -1603,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 			rc = init_resources(adapter);
 			if (rc)
 				return rc;
+		} else if (adapter->req_rx_queues != old_num_rx_queues ||
+			   adapter->req_tx_queues != old_num_tx_queues) {
+			release_rx_pools(adapter);
+			release_tx_pools(adapter);
+			init_rx_pools(netdev);
+			init_tx_pools(netdev);
 		} else {
 			rc = reset_tx_pools(adapter);
 			if (rc)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 2df79fdd800b..fe21a6e2ddae 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
 	u64 opt_rxba_entries_per_subcrq;
 	__be64 tx_rx_desc_req;
 	u8 map_id;
+	u64 num_active_rx_pools;
+	u64 num_active_tx_pools;

 	struct tasklet_struct tasklet;
 	enum vnic_state state;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ