lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4ACB1B95.4090701@gmail.com>
Date:	Tue, 06 Oct 2009 12:27:33 +0200
From:	Eric Dumazet <eric.dumazet@...il.com>
To:	Giuseppe CAVALLARO <peppe.cavallaro@...com>
CC:	netdev@...r.kernel.org
Subject: Re: [PATCH] net: add support for STMicroelectronics Ethernet controllers.

Giuseppe CAVALLARO a écrit :

> +static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
> +{
> +	struct sk_buff *segs, *curr_skb;
> +	int gso_segs = skb_shinfo(skb)->gso_segs;
> +
> +	/* Estimate the number of fragments in the worst case */
> +	if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
> +		netif_stop_queue(priv->dev);
> +		TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
> +		       __func__);
> +		if (stmmac_tx_avail(priv) < gso_segs)
> +			return NETDEV_TX_BUSY;
> +
> +		netif_wake_queue(priv->dev);
> +	}
> +	TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
> +	       skb, skb->len);
> +
> +	segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
> +	if (unlikely(IS_ERR(segs)))
> +		goto sw_tso_end;
> +
> +	do {
> +		curr_skb = segs;
> +		segs = segs->next;
> +		TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
> +		       "*next %p\n", curr_skb->len, curr_skb, segs);
> +		curr_skb->next = NULL;
> +		stmmac_xmit(curr_skb, priv->dev);
> +	} while (segs);
> +
> +sw_tso_end:
> +	dev_kfree_skb(skb);
> +	return NETDEV_TX_OK;
> +}

So stmmac_sw_tso() calls stmmac_xmit() for each seg skb.

But stmmac_sw_tso() was called from stmmac_xmit(),
with priv->tx_lock locked, so I suspect something is wrong.

> +/**
> + *  stmmac_xmit:
> + *  @skb : the socket buffer
> + *  @dev : device pointer
> + *  Description : Tx entry point of the driver.
> + */
> +static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
> +{

...

> +	if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
> +		return NETDEV_TX_LOCKED;
...

> +	if (unlikely(skb_is_gso(skb))) {
> +		ret = stmmac_sw_tso(priv, skb);
> +		goto end_xmit;
> +	}
> +


Also please change various

+	mac = kmalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+	memset(mac, 0, sizeof(struct mac_device_info));
+

+	mac = kmalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+	memset(mac, 0, sizeof(struct mac_device_info));
+

to use kzalloc(), and of course, you should check kmalloc()/kzalloc() dont return NULL !



Also :

+static int stmmac_clean_tx(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	unsigned int txsize = priv->dma_tx_size;
+	unsigned long ioaddr = dev->base_addr;
+	unsigned int entry = priv->dirty_tx % txsize;
+	unsigned int count = 0;
+
+	spin_lock(&priv->tx_lock);
+	while (priv->dirty_tx != priv->cur_tx) {
+		int last;
+		struct sk_buff *skb = priv->tx_skbuff[entry];
+		struct dma_desc *p = priv->dma_tx + entry;
+
+		if (priv->mac_type->ops->get_tx_owner(p))
+			break;
+
+		count++;
+
+		/* verify tx error by looking at the last segment */
+		last = priv->mac_type->ops->get_tx_ls(p);
+		if (likely(last)) {
+			int tx_error =
+			    priv->mac_type->ops->tx_status(&dev->stats,
+							   &priv->xstats,
+							   p, ioaddr);
+			if (likely(tx_error == 0)) {
+				dev->stats.tx_packets++;
+				priv->xstats.tx_pkt_n++;
+			} else
+				dev->stats.tx_errors++;
+		}
+		DBG(intr, DEBUG, "stmmac_tx: curr %d, dirty %d\n",
+		    priv->cur_tx, priv->dirty_tx);
+
+		if (likely(p->des2))
+			dma_unmap_single(priv->device, p->des2,
+					 priv->mac_type->ops->get_tx_len(p),
+					 DMA_TO_DEVICE);
+		if (unlikely(p->des3))
+			p->des3 = 0;
+
+		if (skb != NULL) {
+			/*
+			 * If there's room in the queue (limit it to size)
+			 * we add this skb back into the pool,
+			 * if it's the right size.
+			 */
+			if ((skb_queue_len(&priv->rx_recycle) <
+				priv->dma_rx_size) &&
+				skb_recycle_check(skb, priv->dma_buf_sz))
+				__skb_queue_head(&priv->rx_recycle, skb);
+			else
+				dev_kfree_skb_any(skb);

Why call dev_kfree_skb_any() here ? From NAPI context it is overkill.

+
+			priv->tx_skbuff[entry] = NULL;
+		}
+
+		priv->mac_type->ops->release_tx_desc(p);
+
+		entry = (++priv->dirty_tx) % txsize;
+	}
+	if (unlikely(netif_queue_stopped(dev) &&
+		     stmmac_tx_avail(priv) > (MAX_SKB_FRAGS + 1)))
+		netif_wake_queue(dev);
+
+	spin_unlock(&priv->tx_lock);
+	return count;
+}



static int stmmac_poll(struct napi_struct *napi, int budget)
+{
+	struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
+	struct net_device *dev = priv->dev;
+	int tx_cleaned = 0, work_done = 0;
+
+	priv->xstats.poll_n++;
+
+	tx_cleaned = stmmac_clean_tx(dev);
+
+	work_done = stmmac_rx(dev, budget);
+


+	if (tx_cleaned)
+		return budget;

Why tx_cleaned is used here to exit early ?


+
+	/* If budget not fully consumed, exit the polling mode */
+	if (work_done < budget) {
+		napi_complete(napi);
+		stmmac_dma_enable_irq_rx(dev->base_addr);
+	}
+	return work_done;
+}
+
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ