diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 16a6edf..4483d0f 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -185,7 +185,6 @@ struct e1000_tx_ring { /* array of buffer information structs */ struct e1000_buffer *buffer_info; - spinlock_t tx_lock; uint16_t tdh; uint16_t tdt; boolean_t last_tx_tso; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index cf8af92..2dd6bc0 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -1597,7 +1597,6 @@ setup_tx_desc_die: txdr->next_to_use = 0; txdr->next_to_clean = 0; - spin_lock_init(&txdr->tx_lock); return 0; } @@ -3368,14 +3367,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) (adapter->hw.mac_type == e1000_82573)) e1000_transfer_dhcp_info(adapter, skb); - if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) - /* Collision - tell upper layer to requeue */ - return NETDEV_TX_LOCKED; - /* need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) { - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_BUSY; } @@ -3383,7 +3377,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { netif_stop_queue(netdev); mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_BUSY; } } @@ -3398,7 +3391,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tso = e1000_tso(adapter, tx_ring, skb); if (tso < 0) { dev_kfree_skb_any(skb); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_OK; } @@ -3423,7 +3415,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_OK; }