--- ../../linux-2.6.29.4/drivers/net/usb/usbnet.c 2009-05-19 07:52:34.000000000 +0800 +++ usbnet.c 2009-06-10 10:18:18.000000000 +0800 @@ -45,6 +45,8 @@ #define DRIVER_VERSION "22-Aug-2005" +static int usbnet_aggreate_skb_ximt (struct usbnet *dev, + struct sk_buff_head *q); /*-------------------------------------------------------------------------*/ @@ -551,6 +553,7 @@ static int usbnet_stop (struct net_device *net) { struct usbnet *dev = netdev_priv(net); + struct sk_buff *skb; int temp; DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup); DECLARE_WAITQUEUE (wait, current); @@ -579,6 +582,11 @@ dev->wait = NULL; remove_wait_queue (&unlink_wakeup, &wait); + while (!skb_queue_empty (&dev->tx_waitq)) { + skb = skb_dequeue(&dev->tx_waitq); + dev_kfree_skb (skb); + } + usb_kill_urb(dev->interrupt); /* deferred work (task, timer, softirq) must also stop. @@ -863,13 +871,20 @@ struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; + struct driver_info *info = dev->driver_info; + unsigned long flags; + + if (info->tx_gather) { + spin_lock_irqsave (&dev->tx_waitq.lock, flags); + dev->tx_busy_pkt -= entry->pkt_cnt; + spin_unlock (&dev->tx_waitq.lock); + } if (urb->status == 0) { - dev->stats.tx_packets++; dev->stats.tx_bytes += entry->length; + dev->stats.tx_packets += entry->pkt_cnt; } else { - dev->stats.tx_errors++; - + dev->stats.tx_errors += entry->pkt_cnt; switch (urb->status) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_TX_HALT); @@ -920,6 +935,113 @@ /*-------------------------------------------------------------------------*/ +static int usbnet_aggreate_skb_xmit (struct usbnet *dev, struct sk_buff_head *q) +{ + + struct sk_buff *skb = NULL;//, *skbnext = NULL; + int pkt_cnt = 0; + struct skb_data *entry; + int retval = NET_XMIT_SUCCESS; + struct urb *urb = NULL; + struct driver_info *info = dev->driver_info; + size_t actual_len = 0; + + if (skb_queue_empty (q)) { + return retval; + } + + spin_lock (&q->lock); + + pkt_cnt = skb_queue_len (q); + skb = info->tx_gather (dev, q, &actual_len); + + dev->tx_wait_bytes -= actual_len; + dev->tx_busy_pkt += pkt_cnt; + + spin_unlock (&q->lock); + + if (!skb){ + goto drop; + } + + if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { + if (netif_msg_tx_err (dev)) + devdbg (dev, "no urb"); + goto drop; + }else{ + entry = (struct skb_data *) skb->cb; + entry->length = actual_len; + entry->pkt_cnt = pkt_cnt; + entry->urb = urb; + entry->dev = dev; + entry->state = tx_start; + + usb_fill_bulk_urb (urb, dev->udev, dev->out, + skb->data, skb->len, tx_complete, skb); + } + + switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { + case -EPIPE: + netif_stop_queue (dev->net); + usbnet_defer_kevent (dev, EVENT_TX_HALT); + break; + default: + if (netif_msg_tx_err (dev)) + devdbg (dev, "tx: submit urb err %d", retval); + break; + case 0: + dev->net->trans_start = jiffies; + __skb_queue_tail (&dev->txq, skb); + } + + if (retval) { + if (netif_msg_tx_err (dev)) + devdbg (dev, "drop, code %d", retval); +drop: + spin_lock (&q->lock); + dev->tx_busy_pkt -= pkt_cnt; + spin_unlock (&q->lock); + + retval = NET_XMIT_SUCCESS; + dev->stats.tx_dropped += pkt_cnt; + if (skb) + dev_kfree_skb_any (skb); + usb_free_urb (urb); + } else if (netif_msg_tx_queued (dev)) { + devdbg (dev, "> tx, len %d, type 0x%x", + skb->len, skb->protocol); + } + + return retval; +} + +static int usbnet_bundle_xmit (struct sk_buff *skb, struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + int retval = NET_XMIT_SUCCESS; + unsigned long flags; + + spin_lock_irqsave (&dev->tx_waitq.lock, flags); + __skb_queue_tail (&dev->tx_waitq, skb); + dev->tx_wait_bytes += skb->len; + spin_unlock (&dev->tx_waitq.lock); + + spin_lock (&dev->txq.lock); + + if (skb_queue_empty (&dev->txq) || + (dev->tx_wait_bytes >= dev->tx_threshold) || + (dev->tx_waitq.qlen > 16)) { + retval = usbnet_aggreate_skb_xmit (dev, &dev->tx_waitq); + } + + if ((dev->tx_busy_pkt + dev->tx_waitq.qlen) >= TX_QLEN (dev)) + netif_stop_queue (net); + + spin_unlock_irqrestore (&dev->txq.lock, flags); + + return retval; +} + static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); @@ -953,6 +1075,7 @@ entry->dev = dev; entry->state = tx_start; entry->length = length; + entry->pkt_cnt = 1; usb_fill_bulk_urb (urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb); @@ -1014,6 +1137,8 @@ struct usbnet *dev = (struct usbnet *) param; struct sk_buff *skb; struct skb_data *entry; + struct driver_info *info = dev->driver_info; + unsigned long flags; while ((skb = skb_dequeue (&dev->done))) { entry = (struct skb_data *) skb->cb; @@ -1062,8 +1187,22 @@ if (dev->rxq.qlen < qlen) tasklet_schedule (&dev->bh); } - if (dev->txq.qlen < TX_QLEN (dev)) - netif_wake_queue (dev->net); + + if (info->tx_gather) { + spin_lock_irqsave (&dev->txq.lock, flags); + if (dev->txq.qlen == 0) { + usbnet_aggreate_skb_ximt(dev, &dev->tx_waitq); + } + + if ((dev->tx_busy_pkt + dev->tx_waitq.qlen) < + TX_QLEN (dev)) + netif_wake_queue (dev->net); + + spin_unlock_irqrestore(&dev->txq.lock, flags); + } else { + if (dev->txq.qlen < TX_QLEN (dev)) + netif_wake_queue (dev->net); + } } } @@ -1156,6 +1295,7 @@ skb_queue_head_init (&dev->rxq); skb_queue_head_init (&dev->txq); skb_queue_head_init (&dev->done); + skb_queue_head_init (&dev->tx_waitq); dev->bh.func = usbnet_bh; dev->bh.data = (unsigned long) dev; INIT_WORK (&dev->kevent, kevent); @@ -1181,7 +1321,10 @@ net->change_mtu = usbnet_change_mtu; net->get_stats = usbnet_get_stats; - net->hard_start_xmit = usbnet_start_xmit; + if (info->tx_gather) + net->hard_start_xmit = usbnet_bundle_xmit; + else + net->hard_start_xmit = usbnet_start_xmit; net->open = usbnet_open; net->stop = usbnet_stop; net->watchdog_timeo = TX_TIMEOUT_JIFFIES; @@ -1228,6 +1371,8 @@ if (!dev->rx_urb_size) dev->rx_urb_size = dev->hard_mtu; + if (!dev->tx_threshold) + dev->tx_threshold = dev->rx_urb_size; dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); SET_NETDEV_DEV(net, &udev->dev);