lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 23 Mar 2018 17:37:28 -0700
From:   Felix Manlunas <felix.manlunas@...ium.com>
To:     davem@...emloft.net
Cc:     netdev@...r.kernel.org, raghu.vatsavayi@...ium.com,
        derek.chickles@...ium.com, satananda.burla@...ium.com,
        intiyaz.basha@...ium.com
Subject: [PATCH net-next 07/13] liquidio: Removed netif_is_multiqueue check

From: Intiyaz Basha <intiyaz.basha@...ium.com>

Removing checks for netif_is_multiqueue.
Configuring single queue will be a multiqueue netdev with one queues.

Signed-off-by: Intiyaz Basha <intiyaz.basha@...ium.com>
Acked-by: Derek Chickles <derek.chickles@...ium.com>
Signed-off-by: Felix Manlunas <felix.manlunas@...ium.com>
---
 drivers/net/ethernet/cavium/liquidio/lio_core.c    | 18 ++---
 drivers/net/ethernet/cavium/liquidio/lio_main.c    | 93 +++++++---------------
 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 55 ++++---------
 .../net/ethernet/cavium/liquidio/octeon_network.h  | 50 ++++--------
 4 files changed, 64 insertions(+), 152 deletions(-)

diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 666cf7e..73e70e0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -377,20 +377,12 @@ static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
 		return;
 
 	lio = GET_LIO(netdev);
-	if (netif_is_multiqueue(netdev)) {
-		if (__netif_subqueue_stopped(netdev, iq->q_index) &&
-		    lio->linfo.link.s.link_up &&
-		    (!octnet_iq_is_full(oct, iq_num))) {
-			netif_wake_subqueue(netdev, iq->q_index);
-			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
-						  tx_restart, 1);
-		}
-	} else if (netif_queue_stopped(netdev) &&
-		   lio->linfo.link.s.link_up &&
-		   (!octnet_iq_is_full(oct, lio->txq))) {
-		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+	if (__netif_subqueue_stopped(netdev, iq->q_index) &&
+	    lio->linfo.link.s.link_up &&
+	    (!octnet_iq_is_full(oct, iq_num))) {
+		netif_wake_subqueue(netdev, iq->q_index);
+		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
 					  tx_restart, 1);
-		netif_wake_queue(netdev);
 	}
 }
 
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 41e992c..5ef9aa0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -515,10 +515,7 @@ static void liquidio_deinit_pci(void)
  */
 static inline void wake_q(struct net_device *netdev, int q)
 {
-	if (netif_is_multiqueue(netdev))
-		netif_wake_subqueue(netdev, q);
-	else
-		netif_wake_queue(netdev);
+	netif_wake_subqueue(netdev, q);
 }
 
 /**
@@ -528,10 +525,7 @@ static inline void wake_q(struct net_device *netdev, int q)
  */
 static inline void stop_q(struct net_device *netdev, int q)
 {
-	if (netif_is_multiqueue(netdev))
-		netif_stop_subqueue(netdev, q);
-	else
-		netif_stop_queue(netdev);
+	netif_stop_subqueue(netdev, q);
 }
 
 /**
@@ -541,33 +535,24 @@ static inline void stop_q(struct net_device *netdev, int q)
  */
 static inline int check_txq_status(struct lio *lio)
 {
+	int numqs = lio->netdev->num_tx_queues;
 	int ret_val = 0;
+	int q, iq;
 
-	if (netif_is_multiqueue(lio->netdev)) {
-		int numqs = lio->netdev->num_tx_queues;
-		int q, iq = 0;
-
-		/* check each sub-queue state */
-		for (q = 0; q < numqs; q++) {
-			iq = lio->linfo.txpciq[q %
-				lio->oct_dev->num_iqs].s.q_no;
-			if (octnet_iq_is_full(lio->oct_dev, iq))
-				continue;
-			if (__netif_subqueue_stopped(lio->netdev, q)) {
-				wake_q(lio->netdev, q);
-				INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
-							  tx_restart, 1);
-				ret_val++;
-			}
+	/* check each sub-queue state */
+	for (q = 0; q < numqs; q++) {
+		iq = lio->linfo.txpciq[q %
+			lio->oct_dev->num_iqs].s.q_no;
+		if (octnet_iq_is_full(lio->oct_dev, iq))
+			continue;
+		if (__netif_subqueue_stopped(lio->netdev, q)) {
+			wake_q(lio->netdev, q);
+			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
+						  tx_restart, 1);
+			ret_val++;
 		}
-	} else {
-		if (octnet_iq_is_full(lio->oct_dev, lio->txq))
-			return 0;
-		wake_q(lio->netdev, lio->txq);
-		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
-					  tx_restart, 1);
-		ret_val = 1;
 	}
+
 	return ret_val;
 }
 
@@ -1671,15 +1656,10 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
  */
 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
 {
-	int q = 0, iq = 0;
+	int q, iq;
 
-	if (netif_is_multiqueue(lio->netdev)) {
-		q = skb->queue_mapping;
-		iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
-	} else {
-		iq = lio->txq;
-		q = iq;
-	}
+	q = skb->queue_mapping;
+	iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
 
 	if (octnet_iq_is_full(lio->oct_dev, iq))
 		return 0;
@@ -2568,14 +2548,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 	lio = GET_LIO(netdev);
 	oct = lio->oct_dev;
 
-	if (netif_is_multiqueue(netdev)) {
-		q_idx = skb->queue_mapping;
-		q_idx = (q_idx % (lio->linfo.num_txpciq));
-		tag = q_idx;
-		iq_no = lio->linfo.txpciq[q_idx].s.q_no;
-	} else {
-		iq_no = lio->txq;
-	}
+	q_idx = skb->queue_mapping;
+	q_idx = (q_idx % (lio->linfo.num_txpciq));
+	tag = q_idx;
+	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
 
 	stats = &oct->instr_queue[iq_no]->stats;
 
@@ -2606,23 +2582,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 
 	ndata.q_no = iq_no;
 
-	if (netif_is_multiqueue(netdev)) {
-		if (octnet_iq_is_full(oct, ndata.q_no)) {
-			/* defer sending if queue is full */
-			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
-				   ndata.q_no);
-			stats->tx_iq_busy++;
-			return NETDEV_TX_BUSY;
-		}
-	} else {
-		if (octnet_iq_is_full(oct, lio->txq)) {
-			/* defer sending if queue is full */
-			stats->tx_iq_busy++;
-			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
-				   lio->txq);
-			return NETDEV_TX_BUSY;
-		}
+	if (octnet_iq_is_full(oct, ndata.q_no)) {
+		/* defer sending if queue is full */
+		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+			   ndata.q_no);
+		stats->tx_iq_busy++;
+		return NETDEV_TX_BUSY;
 	}
+
 	/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
 	 *	lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
 	 */
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 57b6ee5..f46289d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -291,10 +291,7 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
  */
 static void wake_q(struct net_device *netdev, int q)
 {
-	if (netif_is_multiqueue(netdev))
-		netif_wake_subqueue(netdev, q);
-	else
-		netif_wake_queue(netdev);
+	netif_wake_subqueue(netdev, q);
 }
 
 /**
@@ -304,10 +301,7 @@ static void wake_q(struct net_device *netdev, int q)
  */
 static void stop_q(struct net_device *netdev, int q)
 {
-	if (netif_is_multiqueue(netdev))
-		netif_stop_subqueue(netdev, q);
-	else
-		netif_stop_queue(netdev);
+	netif_stop_subqueue(netdev, q);
 }
 
 /**
@@ -986,15 +980,10 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
  */
 static int check_txq_state(struct lio *lio, struct sk_buff *skb)
 {
-	int q = 0, iq = 0;
+	int q, iq;
 
-	if (netif_is_multiqueue(lio->netdev)) {
-		q = skb->queue_mapping;
-		iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no;
-	} else {
-		iq = lio->txq;
-		q = iq;
-	}
+	q = skb->queue_mapping;
+	iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no;
 
 	if (octnet_iq_is_full(lio->oct_dev, iq))
 		return 0;
@@ -1635,14 +1624,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 	lio = GET_LIO(netdev);
 	oct = lio->oct_dev;
 
-	if (netif_is_multiqueue(netdev)) {
-		q_idx = skb->queue_mapping;
-		q_idx = (q_idx % (lio->linfo.num_txpciq));
-		tag = q_idx;
-		iq_no = lio->linfo.txpciq[q_idx].s.q_no;
-	} else {
-		iq_no = lio->txq;
-	}
+	q_idx = skb->queue_mapping;
+	q_idx = (q_idx % (lio->linfo.num_txpciq));
+	tag = q_idx;
+	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
 
 	stats = &oct->instr_queue[iq_no]->stats;
 
@@ -1671,22 +1656,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 
 	ndata.q_no = iq_no;
 
-	if (netif_is_multiqueue(netdev)) {
-		if (octnet_iq_is_full(oct, ndata.q_no)) {
-			/* defer sending if queue is full */
-			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
-				   ndata.q_no);
-			stats->tx_iq_busy++;
-			return NETDEV_TX_BUSY;
-		}
-	} else {
-		if (octnet_iq_is_full(oct, lio->txq)) {
-			/* defer sending if queue is full */
-			stats->tx_iq_busy++;
-			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
-				   ndata.q_no);
-			return NETDEV_TX_BUSY;
-		}
+	if (octnet_iq_is_full(oct, ndata.q_no)) {
+		/* defer sending if queue is full */
+		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+			   ndata.q_no);
+		stats->tx_iq_busy++;
+		return NETDEV_TX_BUSY;
 	}
 
 	ndata.datasize = skb->len;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 72a581a..7922a69 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -512,14 +512,10 @@ static inline int wait_for_pending_requests(struct octeon_device *oct)
  */
 static inline void txqs_stop(struct net_device *netdev)
 {
-	if (netif_is_multiqueue(netdev)) {
-		int i;
+	int i;
 
-		for (i = 0; i < netdev->num_tx_queues; i++)
-			netif_stop_subqueue(netdev, i);
-	} else {
-		netif_stop_queue(netdev);
-	}
+	for (i = 0; i < netdev->num_tx_queues; i++)
+		netif_stop_subqueue(netdev, i);
 }
 
 /**
@@ -529,24 +525,16 @@ static inline void txqs_stop(struct net_device *netdev)
 static inline void txqs_wake(struct net_device *netdev)
 {
 	struct lio *lio = GET_LIO(netdev);
+	int i, qno;
 
-	if (netif_is_multiqueue(netdev)) {
-		int i;
+	for (i = 0; i < netdev->num_tx_queues; i++) {
+		qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
 
-		for (i = 0; i < netdev->num_tx_queues; i++) {
-			int qno = lio->linfo.txpciq[i %
-				lio->oct_dev->num_iqs].s.q_no;
-
-			if (__netif_subqueue_stopped(netdev, i)) {
-				INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
-							  tx_restart, 1);
-				netif_wake_subqueue(netdev, i);
-			}
+		if (__netif_subqueue_stopped(netdev, i)) {
+			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
+						  tx_restart, 1);
+			netif_wake_subqueue(netdev, i);
 		}
-	} else {
-		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
-					  tx_restart, 1);
-		netif_wake_queue(netdev);
 	}
 }
 
@@ -557,27 +545,17 @@ static inline void txqs_wake(struct net_device *netdev)
 static inline void txqs_start(struct net_device *netdev)
 {
 	struct lio *lio = GET_LIO(netdev);
+	int i;
 
 	if (lio->linfo.link.s.link_up) {
-		if (netif_is_multiqueue(netdev)) {
-			int i;
-
-			for (i = 0; i < netdev->num_tx_queues; i++)
-				netif_start_subqueue(netdev, i);
-		} else {
-			netif_start_queue(netdev);
-		}
+		for (i = 0; i < netdev->num_tx_queues; i++)
+			netif_start_subqueue(netdev, i);
 	}
 }
 
 static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
 {
-	int q = 0;
-
-	if (netif_is_multiqueue(lio->netdev))
-		q = skb->queue_mapping % lio->linfo.num_txpciq;
-
-	return q;
+	return skb->queue_mapping % lio->linfo.num_txpciq;
 }
 
 #endif
-- 
1.8.3.1

Powered by blists - more mailing lists