lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 11 Jul 2011 02:52:49 +0200 (CEST)
From:	Michał Mirosław <mirq-linux@...e.qmqm.pl>
To:	netdev@...r.kernel.org
Subject: [PATCH v2 11/46] net: sungem: cleanup RX skb allocation

Signed-off-by: Michał Mirosław <mirq-linux@...e.qmqm.pl>
---
 drivers/net/sungem.c |   34 +++++++++++-----------------------
 drivers/net/sungem.h |    4 +++-
 2 files changed, 14 insertions(+), 24 deletions(-)

diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index ade35dd..e82617f 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -743,21 +743,6 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
 	}
 }
 
-#define ALIGNED_RX_SKB_ADDR(addr) \
-        ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
-static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
-						gfp_t gfp_flags)
-{
-	struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
-
-	if (likely(skb)) {
-		unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
-		skb_reserve(skb, offset);
-		skb->dev = dev;
-	}
-	return skb;
-}
-
 static int gem_rx(struct gem *gp, int work_to_do)
 {
 	struct net_device *dev = gp->dev;
@@ -821,7 +806,10 @@ static int gem_rx(struct gem *gp, int work_to_do)
 		if (len > RX_COPY_THRESHOLD) {
 			struct sk_buff *new_skb;
 
-			new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
+			new_skb = __netdev_alloc_skb_aligned(dev,
+							     RX_BUF_ALLOC_SIZE(gp),
+							     SUNGEM_RX_ALIGNMENT,
+							     GFP_ATOMIC);
 			if (new_skb == NULL) {
 				drops++;
 				goto drop_it;
@@ -830,7 +818,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
 				       RX_BUF_ALLOC_SIZE(gp),
 				       PCI_DMA_FROMDEVICE);
 			gp->rx_skbs[entry] = new_skb;
-			skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
 			rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
 							       virt_to_page(new_skb->data),
 							       offset_in_page(new_skb->data),
@@ -838,17 +825,16 @@ static int gem_rx(struct gem *gp, int work_to_do)
 							       PCI_DMA_FROMDEVICE));
 			skb_reserve(new_skb, RX_OFFSET);
 
-			/* Trim the original skb for the netif. */
-			skb_trim(skb, len);
+			skb_put(skb, len);
 		} else {
-			struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
+			struct sk_buff *copy_skb =
+				netdev_alloc_skb_ip_align(dev, len);
 
 			if (copy_skb == NULL) {
 				drops++;
 				goto drop_it;
 			}
 
-			skb_reserve(copy_skb, 2);
 			skb_put(copy_skb, len);
 			pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 			skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -1637,7 +1623,10 @@ static void gem_init_rings(struct gem *gp)
 		struct sk_buff *skb;
 		struct gem_rxd *rxd = &gb->rxd[i];
 
-		skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
+		skb = __netdev_alloc_skb_aligned(dev,
+						 RX_BUF_ALLOC_SIZE(gp),
+						 SUNGEM_RX_ALIGNMENT,
+						 GFP_KERNEL);
 		if (!skb) {
 			rxd->buffer = 0;
 			rxd->status_word = 0;
@@ -1645,7 +1634,6 @@ static void gem_init_rings(struct gem *gp)
 		}
 
 		gp->rx_skbs[i] = skb;
-		skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
 		dma_addr = pci_map_page(gp->pdev,
 					virt_to_page(skb->data),
 					offset_in_page(skb->data),
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 835ce1b..0d486ce 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -935,7 +935,9 @@ struct gem_rxd {
 	  (GP)->tx_old - (GP)->tx_new - 1)
 
 #define RX_OFFSET          2
-#define RX_BUF_ALLOC_SIZE(gp)	((gp)->rx_buf_sz + 28 + RX_OFFSET + 64)
+#define SUNGEM_RX_ALIGNMENT 64		/* min: cache line size, see comment above */
+#define RX_BUF_ALLOC_SIZE(gp)	\
+	ALIGN((gp)->rx_buf_sz + RX_OFFSET, SUNGEM_RX_ALIGNMENT)
 
 #define RX_COPY_THRESHOLD  256
 
-- 
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists