lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 13 Jan 2010 19:41:48 -0800
From:	Stephen Hemminger <shemminger@...tta.com>
To:	Michael Breuer <mbreuer@...jas.com>,
	David Miller <davem@...emloft.net>
Cc:	jarkao2@...il.com, mikem@...g3k.org, flyboy@...il.com, rjw@...k.pl,
	netdev@...r.kernel.org
Subject: Re: [PATCH] sky2: safer transmit ring cleaning (v4)

Subject: sky2: safer transmit cleanup

This code makes transmit path and transmit reset safer by:
  * adding memory barrier before checking available ring slots
  * reseting state of tx ring elements after free
  * seperate cleanup function from ring done function
  * removing mostly unused tx_next element
  * ignoring transmit completion if device is offline

Signed-off-by: Stephen Hemminger <shemminger@...tta.com>

---
This patch is against the current net-next-2.6 tree.
This version handles the case of dual port shared transmit status
and other cases where it is possible for tx_done to be called when
device is being changed.

--- a/drivers/net/sky2.c	2010-01-13 08:32:51.360161158 -0800
+++ b/drivers/net/sky2.c	2010-01-13 08:35:37.685531490 -0800
@@ -1596,6 +1596,9 @@ static inline int tx_inuse(const struct 
 /* Number of list elements available for next tx */
 static inline int tx_avail(const struct sky2_port *sky2)
 {
+	/* Makes sure update of tx_prod from start_xmit and
+	   tx_cons from tx_done are seen. */
+	smp_mb();
 	return sky2->tx_pending - tx_inuse(sky2);
 }
 
@@ -1618,8 +1621,7 @@ static unsigned tx_le_req(const struct s
 	return count;
 }
 
-static void sky2_tx_unmap(struct pci_dev *pdev,
-			  const struct tx_ring_info *re)
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
 {
 	if (re->flags & TX_MAP_SINGLE)
 		pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1629,6 +1631,7 @@ static void sky2_tx_unmap(struct pci_dev
 		pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
 			       pci_unmap_len(re, maplen),
 			       PCI_DMA_TODEVICE);
+	re->flags = 0;
 }
 
 /*
@@ -1804,7 +1807,8 @@ mapping_error:
 }
 
 /*
- * Free ring elements from starting at tx_cons until "done"
+ * Transmit complete processing
+ * Free ring elements from starting at tx_cons until done index
  *
  * NB:
  *  1. The hardware will tell us about partial completion of multi-part
@@ -1813,11 +1817,14 @@ mapping_error:
  *     looks at the tail of the queue of FIFO (tx_cons), not
  *     the head (tx_prod)
  */
-static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
+static void sky2_tx_done(struct net_device *dev, u16 done)
 {
-	struct net_device *dev = sky2->netdev;
+	struct sky2_port *sky2 = netdev_priv(dev);
 	unsigned idx;
 
+	if (!(netif_running(dev) & netif_device_present(dev)))
+		return;
+
 	BUG_ON(done >= sky2->tx_ring_size);
 
 	for (idx = sky2->tx_cons; idx != done;
@@ -1828,6 +1835,8 @@ static void sky2_tx_complete(struct sky2
 		sky2_tx_unmap(sky2->hw->pdev, re);
 
 		if (skb) {
+			re->skb = NULL;
+
 			if (unlikely(netif_msg_tx_done(sky2)))
 				printk(KERN_DEBUG "%s: tx done %u\n",
 				       dev->name, idx);
@@ -1836,16 +1845,12 @@ static void sky2_tx_complete(struct sky2
 			dev->stats.tx_bytes += skb->len;
 
 			dev_kfree_skb_any(skb);
-
-			sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
 		}
 	}
 
 	sky2->tx_cons = idx;
-	smp_mb();
 
-	/* Wake unless it's detached, and called e.g. from sky2_down() */
-	if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
+	if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
 		netif_wake_queue(dev);
 }
 
@@ -1871,6 +1876,21 @@ static void sky2_tx_reset(struct sky2_hw
 	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
 }
 
+static void sky2_tx_clean(struct sky2_port *sky2)
+{
+	u16 idx;
+
+	for (idx = 0; idx < sky2->tx_ring_size; idx++) {
+		struct tx_ring_info *re = sky2->tx_ring + idx;
+
+		sky2_tx_unmap(sky2->hw->pdev, re);
+		if (re->skb) {
+			dev_kfree_skb_any(re->skb);
+			re->skb = NULL;
+		}
+	}
+}
+
 /* Network shutdown */
 static int sky2_down(struct net_device *dev)
 {
@@ -1934,8 +1954,7 @@ static int sky2_down(struct net_device *
 	sky2_tx_reset(hw, port);
 
 	/* Free any pending frames stuck in HW queue */
-	sky2_tx_complete(sky2, sky2->tx_prod);
-
+	sky2_tx_clean(sky2);
 	sky2_rx_clean(sky2);
 
 	sky2_free_buffers(sky2);
@@ -2412,15 +2431,6 @@ error:
 	goto resubmit;
 }
 
-/* Transmit complete */
-static inline void sky2_tx_done(struct net_device *dev, u16 last)
-{
-	struct sky2_port *sky2 = netdev_priv(dev);
-
-	if (netif_running(dev))
-		sky2_tx_complete(sky2, last);
-}
-
 static inline void sky2_skb_rx(const struct sky2_port *sky2,
 			       u32 status, struct sk_buff *skb)
 {
@@ -3177,9 +3187,9 @@ static void sky2_reset(struct sky2_hw *h
 static void sky2_detach(struct net_device *dev)
 {
 	if (netif_running(dev)) {
-		netif_tx_lock(dev);
+		netif_tx_lock_bh(dev);
 		netif_device_detach(dev);	/* stop txq */
-		netif_tx_unlock(dev);
+		netif_tx_unlock_bh(dev);
 		sky2_down(dev);
 	}
 }
@@ -4202,7 +4212,7 @@ static int sky2_debug_show(struct seq_fi
 
 	/* Dump contents of tx ring */
 	sop = 1;
-	for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size;
+	for (idx = sky2->tx_cons; idx != sky2->tx_prod && idx < sky2->tx_ring_size;
 	     idx = RING_NEXT(idx, sky2->tx_ring_size)) {
 		const struct sky2_tx_le *le = sky2->tx_le + idx;
 		u32 a = le32_to_cpu(le->addr);
--- a/drivers/net/sky2.h	2010-01-13 08:32:27.919849429 -0800
+++ b/drivers/net/sky2.h	2010-01-13 08:33:03.410162026 -0800
@@ -2187,7 +2187,6 @@ struct sky2_port {
 	u16		     tx_ring_size;
 	u16		     tx_cons;		/* next le to check */
 	u16		     tx_prod;		/* next le to use */
-	u16		     tx_next;		/* debug only */
 
 	u16		     tx_pending;
 	u16		     tx_last_mss;
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ