lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20120329124824.41fc7503d402b11a58a0dfde@canb.auug.org.au>
Date:	Thu, 29 Mar 2012 12:48:24 +1100
From:	Stephen Rothwell <sfr@...b.auug.org.au>
To:	Jiri Kosina <jkosina@...e.cz>
Cc:	linux-next@...r.kernel.org, linux-kernel@...r.kernel.org,
	"Justin P. Mattock" <justinmattock@...il.com>,
	Francois Romieu <romieu@...zoreil.com>
Subject: linux-next: manual merge of the trivial tree with Linus' tree

Hi Jiri,

Today's linux-next merge of the trivial tree got a conflict in
drivers/net/ethernet/realtek/r8169.c between commit df43ac7831a0 ("r8169:
move rtl8169_open after rtl_task it depends on") (and others) from Linus'
tree and commit a9d7e794ea66 ("r8169.c: fix comment typo") from the
trivial tree.

I fixed it up (see below) and can carry the fix as necessary.
-- 
Cheers,
Stephen Rothwell                    sfr@...b.auug.org.au

diff --cc drivers/net/ethernet/realtek/r8169.c
index 27c358c,7f06508..0000000
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@@ -5530,768 -5390,823 +5530,768 @@@ static void rtl_task(struct work_struc
  	struct net_device *dev = tp->dev;
  	int i;
  
 -	rtnl_lock();
 +	rtl_lock_work(tp);
  
 -	if (!netif_running(dev))
 +	if (!netif_running(dev) ||
 +	    !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
  		goto out_unlock;
  
 -	rtl8169_hw_reset(tp);
 -
 -	rtl8169_wait_for_quiescence(dev);
 -
 -	for (i = 0; i < NUM_RX_DESC; i++)
 -		rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
 -
 -	rtl8169_tx_clear(tp);
 -	rtl8169_init_ring_indexes(tp);
 +	for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
 +		bool pending;
  
 -	rtl_hw_start(dev);
 -	netif_wake_queue(dev);
 -	rtl8169_check_link_status(dev, tp, tp->mmio_addr);
 +		pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
 +		if (pending)
 +			rtl_work[i].action(tp);
 +	}
  
  out_unlock:
 -	rtnl_unlock();
 -}
 -
 -static void rtl8169_tx_timeout(struct net_device *dev)
 -{
 -	rtl8169_schedule_work(dev, rtl8169_reset_task);
 +	rtl_unlock_work(tp);
  }
  
 -static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
 -			      u32 *opts)
 +static int rtl8169_poll(struct napi_struct *napi, int budget)
  {
 -	struct skb_shared_info *info = skb_shinfo(skb);
 -	unsigned int cur_frag, entry;
 -	struct TxDesc * uninitialized_var(txd);
 -	struct device *d = &tp->pci_dev->dev;
 -
 -	entry = tp->cur_tx;
 -	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
 -		const skb_frag_t *frag = info->frags + cur_frag;
 -		dma_addr_t mapping;
 -		u32 status, len;
 -		void *addr;
 +	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
 +	struct net_device *dev = tp->dev;
 +	u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
 +	int work_done= 0;
 +	u16 status;
  
 -		entry = (entry + 1) % NUM_TX_DESC;
 +	status = rtl_get_events(tp);
 +	rtl_ack_events(tp, status & ~tp->event_slow);
  
 -		txd = tp->TxDescArray + entry;
 -		len = skb_frag_size(frag);
 -		addr = skb_frag_address(frag);
 -		mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
 -		if (unlikely(dma_mapping_error(d, mapping))) {
 -			if (net_ratelimit())
 -				netif_err(tp, drv, tp->dev,
 -					  "Failed to map TX fragments DMA!\n");
 -			goto err_out;
 -		}
 +	if (status & RTL_EVENT_NAPI_RX)
 +		work_done = rtl_rx(dev, tp, (u32) budget);
  
 -		/* Anti gcc 2.95.3 bugware (sic) */
 -		status = opts[0] | len |
 -			(RingEnd * !((entry + 1) % NUM_TX_DESC));
 +	if (status & RTL_EVENT_NAPI_TX)
 +		rtl_tx(dev, tp);
  
 -		txd->opts1 = cpu_to_le32(status);
 -		txd->opts2 = cpu_to_le32(opts[1]);
 -		txd->addr = cpu_to_le64(mapping);
 +	if (status & tp->event_slow) {
 +		enable_mask &= ~tp->event_slow;
  
 -		tp->tx_skb[entry].len = len;
 +		rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
  	}
  
 -	if (cur_frag) {
 -		tp->tx_skb[entry].skb = skb;
 -		txd->opts1 |= cpu_to_le32(LastFrag);
 -	}
 +	if (work_done < budget) {
 +		napi_complete(napi);
  
 -	return cur_frag;
 +		rtl_irq_enable(tp, enable_mask);
 +		mmiowb();
 +	}
  
 -err_out:
 -	rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
 -	return -EIO;
 +	return work_done;
  }
  
 -static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
 -				    struct sk_buff *skb, u32 *opts)
 +static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
  {
 -	const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
 -	u32 mss = skb_shinfo(skb)->gso_size;
 -	int offset = info->opts_offset;
 +	struct rtl8169_private *tp = netdev_priv(dev);
  
 -	if (mss) {
 -		opts[0] |= TD_LSO;
 -		opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
 -	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 -		const struct iphdr *ip = ip_hdr(skb);
 +	if (tp->mac_version > RTL_GIGA_MAC_VER_06)
 +		return;
  
 -		if (ip->protocol == IPPROTO_TCP)
 -			opts[offset] |= info->checksum.tcp;
 -		else if (ip->protocol == IPPROTO_UDP)
 -			opts[offset] |= info->checksum.udp;
 -		else
 -			WARN_ON_ONCE(1);
 -	}
 +	dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
 +	RTL_W32(RxMissed, 0);
  }
  
 -static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 -				      struct net_device *dev)
 +static void rtl8169_down(struct net_device *dev)
  {
  	struct rtl8169_private *tp = netdev_priv(dev);
 -	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
 -	struct TxDesc *txd = tp->TxDescArray + entry;
  	void __iomem *ioaddr = tp->mmio_addr;
 -	struct device *d = &tp->pci_dev->dev;
 -	dma_addr_t mapping;
 -	u32 status, len;
 -	u32 opts[2];
 -	int frags;
  
 -	if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
 -		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
 -		goto err_stop_0;
 -	}
 +	del_timer_sync(&tp->timer);
  
 -	if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
 -		goto err_stop_0;
 +	napi_disable(&tp->napi);
 +	netif_stop_queue(dev);
  
 -	len = skb_headlen(skb);
 -	mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
 -	if (unlikely(dma_mapping_error(d, mapping))) {
 -		if (net_ratelimit())
 -			netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
 -		goto err_dma_0;
 -	}
 +	rtl8169_hw_reset(tp);
 +	/*
 +	 * At this point device interrupts can not be enabled in any function,
 +	 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
 +	 * and napi is disabled (rtl8169_poll).
 +	 */
 +	rtl8169_rx_missed(dev, ioaddr);
  
 -	tp->tx_skb[entry].len = len;
 -	txd->addr = cpu_to_le64(mapping);
 +	/* Give a racing hard_start_xmit a few cycles to complete. */
 +	synchronize_sched();
  
 -	opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
 -	opts[0] = DescOwn;
 +	rtl8169_tx_clear(tp);
  
 -	rtl8169_tso_csum(tp, skb, opts);
 +	rtl8169_rx_clear(tp);
  
 -	frags = rtl8169_xmit_frags(tp, skb, opts);
 -	if (frags < 0)
 -		goto err_dma_1;
 -	else if (frags)
 -		opts[0] |= FirstFrag;
 -	else {
 -		opts[0] |= FirstFrag | LastFrag;
 -		tp->tx_skb[entry].skb = skb;
 -	}
 +	rtl_pll_power_down(tp);
 +}
  
 -	txd->opts2 = cpu_to_le32(opts[1]);
 +static int rtl8169_close(struct net_device *dev)
 +{
 +	struct rtl8169_private *tp = netdev_priv(dev);
 +	struct pci_dev *pdev = tp->pci_dev;
  
 -	wmb();
 +	pm_runtime_get_sync(&pdev->dev);
  
 -	/* Anti gcc 2.95.3 bugware (sic) */
 -	status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
 -	txd->opts1 = cpu_to_le32(status);
 +	/* Update counters before going down */
 +	rtl8169_update_counters(dev);
  
 -	tp->cur_tx += frags + 1;
 +	rtl_lock_work(tp);
 +	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
  
 -	wmb();
 +	rtl8169_down(dev);
 +	rtl_unlock_work(tp);
  
 -	RTL_W8(TxPoll, NPQ);
 +	free_irq(pdev->irq, dev);
  
 -	if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
 -		netif_stop_queue(dev);
 -		smp_rmb();
 -		if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
 -			netif_wake_queue(dev);
 -	}
 +	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
 +			  tp->RxPhyAddr);
 +	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
 +			  tp->TxPhyAddr);
 +	tp->TxDescArray = NULL;
 +	tp->RxDescArray = NULL;
  
 -	return NETDEV_TX_OK;
 +	pm_runtime_put_sync(&pdev->dev);
  
 -err_dma_1:
 -	rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
 -err_dma_0:
 -	dev_kfree_skb(skb);
 -	dev->stats.tx_dropped++;
 -	return NETDEV_TX_OK;
 +	return 0;
 +}
  
 -err_stop_0:
 -	netif_stop_queue(dev);
 -	dev->stats.tx_dropped++;
 -	return NETDEV_TX_BUSY;
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void rtl8169_netpoll(struct net_device *dev)
 +{
 +	struct rtl8169_private *tp = netdev_priv(dev);
 +
 +	rtl8169_interrupt(tp->pci_dev->irq, dev);
  }
 +#endif
  
 -static void rtl8169_pcierr_interrupt(struct net_device *dev)
 +static int rtl_open(struct net_device *dev)
  {
  	struct rtl8169_private *tp = netdev_priv(dev);
 +	void __iomem *ioaddr = tp->mmio_addr;
  	struct pci_dev *pdev = tp->pci_dev;
 -	u16 pci_status, pci_cmd;
 -
 -	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
 -	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
 +	int retval = -ENOMEM;
  
 -	netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
 -		  pci_cmd, pci_status);
 +	pm_runtime_get_sync(&pdev->dev);
  
  	/*
- 	 * Rx and Tx desscriptors needs 256 bytes alignment.
 -	 * The recovery sequence below admits a very elaborated explanation:
 -	 * - it seems to work;
 -	 * - I did not see what else could be done;
 -	 * - it makes iop3xx happy.
 -	 *
 -	 * Feel free to adjust to your needs.
++	 * Rx and Tx desscriptors need 256 bytes alignment.
 +	 * dma_alloc_coherent provides more.
  	 */
 -	if (pdev->broken_parity_status)
 -		pci_cmd &= ~PCI_COMMAND_PARITY;
 -	else
 -		pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
 +	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
 +					     &tp->TxPhyAddr, GFP_KERNEL);
 +	if (!tp->TxDescArray)
 +		goto err_pm_runtime_put;
  
 -	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
 +	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
 +					     &tp->RxPhyAddr, GFP_KERNEL);
 +	if (!tp->RxDescArray)
 +		goto err_free_tx_0;
  
 -	pci_write_config_word(pdev, PCI_STATUS,
 -		pci_status & (PCI_STATUS_DETECTED_PARITY |
 -		PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
 -		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
 +	retval = rtl8169_init_ring(dev);
 +	if (retval < 0)
 +		goto err_free_rx_1;
  
 -	/* The infamous DAC f*ckup only happens at boot time */
 -	if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
 -		void __iomem *ioaddr = tp->mmio_addr;
 +	INIT_WORK(&tp->wk.work, rtl_task);
  
 -		netif_info(tp, intr, dev, "disabling PCI DAC\n");
 -		tp->cp_cmd &= ~PCIDAC;
 -		RTL_W16(CPlusCmd, tp->cp_cmd);
 -		dev->features &= ~NETIF_F_HIGHDMA;
 -	}
 +	smp_mb();
  
 -	rtl8169_hw_reset(tp);
 +	rtl_request_firmware(tp);
  
 -	rtl8169_schedule_work(dev, rtl8169_reinit_task);
 -}
 +	retval = request_irq(pdev->irq, rtl8169_interrupt,
 +			     (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
 +			     dev->name, dev);
 +	if (retval < 0)
 +		goto err_release_fw_2;
  
 -static void rtl8169_tx_interrupt(struct net_device *dev,
 -				 struct rtl8169_private *tp,
 -				 void __iomem *ioaddr)
 -{
 -	unsigned int dirty_tx, tx_left;
 +	rtl_lock_work(tp);
  
 -	dirty_tx = tp->dirty_tx;
 -	smp_rmb();
 -	tx_left = tp->cur_tx - dirty_tx;
 +	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
  
 -	while (tx_left > 0) {
 -		unsigned int entry = dirty_tx % NUM_TX_DESC;
 -		struct ring_info *tx_skb = tp->tx_skb + entry;
 -		u32 status;
 +	napi_enable(&tp->napi);
  
 -		rmb();
 -		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
 -		if (status & DescOwn)
 -			break;
 +	rtl8169_init_phy(dev, tp);
  
 -		rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
 -				     tp->TxDescArray + entry);
 -		if (status & LastFrag) {
 -			dev->stats.tx_packets++;
 -			dev->stats.tx_bytes += tx_skb->skb->len;
 -			dev_kfree_skb(tx_skb->skb);
 -			tx_skb->skb = NULL;
 -		}
 -		dirty_tx++;
 -		tx_left--;
 -	}
 +	__rtl8169_set_features(dev, dev->features);
  
 -	if (tp->dirty_tx != dirty_tx) {
 -		tp->dirty_tx = dirty_tx;
 -		smp_wmb();
 -		if (netif_queue_stopped(dev) &&
 -		    (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
 -			netif_wake_queue(dev);
 -		}
 -		/*
 -		 * 8168 hack: TxPoll requests are lost when the Tx packets are
 -		 * too close. Let's kick an extra TxPoll request when a burst
 -		 * of start_xmit activity is detected (if it is not detected,
 -		 * it is slow enough). -- FR
 -		 */
 -		smp_rmb();
 -		if (tp->cur_tx != dirty_tx)
 -			RTL_W8(TxPoll, NPQ);
 -	}
 -}
 +	rtl_pll_power_up(tp);
  
 -static inline int rtl8169_fragmented_frame(u32 status)
 -{
 -	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
 -}
 +	rtl_hw_start(dev);
  
 -static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
 -{
 -	u32 status = opts1 & RxProtoMask;
 +	netif_start_queue(dev);
  
 -	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
 -	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
 -		skb->ip_summed = CHECKSUM_UNNECESSARY;
 -	else
 -		skb_checksum_none_assert(skb);
 -}
 +	rtl_unlock_work(tp);
  
 -static struct sk_buff *rtl8169_try_rx_copy(void *data,
 -					   struct rtl8169_private *tp,
 -					   int pkt_size,
 -					   dma_addr_t addr)
 -{
 -	struct sk_buff *skb;
 -	struct device *d = &tp->pci_dev->dev;
 +	tp->saved_wolopts = 0;
 +	pm_runtime_put_noidle(&pdev->dev);
  
 -	data = rtl8169_align(data);
 -	dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
 -	prefetch(data);
 -	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
 -	if (skb)
 -		memcpy(skb->data, data, pkt_size);
 -	dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
 +	rtl8169_check_link_status(dev, tp, ioaddr);
 +out:
 +	return retval;
  
 -	return skb;
 +err_release_fw_2:
 +	rtl_release_firmware(tp);
 +	rtl8169_rx_clear(tp);
 +err_free_rx_1:
 +	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
 +			  tp->RxPhyAddr);
 +	tp->RxDescArray = NULL;
 +err_free_tx_0:
 +	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
 +			  tp->TxPhyAddr);
 +	tp->TxDescArray = NULL;
 +err_pm_runtime_put:
 +	pm_runtime_put_noidle(&pdev->dev);
 +	goto out;
  }
  
 -static int rtl8169_rx_interrupt(struct net_device *dev,
 -				struct rtl8169_private *tp,
 -				void __iomem *ioaddr, u32 budget)
 +static struct rtnl_link_stats64 *
 +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  {
 -	unsigned int cur_rx, rx_left;
 -	unsigned int count;
 +	struct rtl8169_private *tp = netdev_priv(dev);
 +	void __iomem *ioaddr = tp->mmio_addr;
 +	unsigned int start;
  
 -	cur_rx = tp->cur_rx;
 -	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
 -	rx_left = min(rx_left, budget);
 +	if (netif_running(dev))
 +		rtl8169_rx_missed(dev, ioaddr);
  
 -	for (; rx_left > 0; rx_left--, cur_rx++) {
 -		unsigned int entry = cur_rx % NUM_RX_DESC;
 -		struct RxDesc *desc = tp->RxDescArray + entry;
 -		u32 status;
 +	do {
 +		start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
 +		stats->rx_packets = tp->rx_stats.packets;
 +		stats->rx_bytes	= tp->rx_stats.bytes;
 +	} while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
  
 -		rmb();
 -		status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
  
 -		if (status & DescOwn)
 -			break;
 -		if (unlikely(status & RxRES)) {
 -			netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
 -				   status);
 -			dev->stats.rx_errors++;
 -			if (status & (RxRWT | RxRUNT))
 -				dev->stats.rx_length_errors++;
 -			if (status & RxCRC)
 -				dev->stats.rx_crc_errors++;
 -			if (status & RxFOVF) {
 -				rtl8169_schedule_work(dev, rtl8169_reset_task);
 -				dev->stats.rx_fifo_errors++;
 -			}
 -			rtl8169_mark_to_asic(desc, rx_buf_sz);
 -		} else {
 -			struct sk_buff *skb;
 -			dma_addr_t addr = le64_to_cpu(desc->addr);
 -			int pkt_size = (status & 0x00003fff) - 4;
 +	do {
 +		start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
 +		stats->tx_packets = tp->tx_stats.packets;
 +		stats->tx_bytes	= tp->tx_stats.bytes;
 +	} while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
  
 -			/*
 -			 * The driver does not support incoming fragmented
 -			 * frames. They are seen as a symptom of over-mtu
 -			 * sized frames.
 -			 */
 -			if (unlikely(rtl8169_fragmented_frame(status))) {
 -				dev->stats.rx_dropped++;
 -				dev->stats.rx_length_errors++;
 -				rtl8169_mark_to_asic(desc, rx_buf_sz);
 -				continue;
 -			}
 +	stats->rx_dropped	= dev->stats.rx_dropped;
 +	stats->tx_dropped	= dev->stats.tx_dropped;
 +	stats->rx_length_errors = dev->stats.rx_length_errors;
 +	stats->rx_errors	= dev->stats.rx_errors;
 +	stats->rx_crc_errors	= dev->stats.rx_crc_errors;
 +	stats->rx_fifo_errors	= dev->stats.rx_fifo_errors;
 +	stats->rx_missed_errors = dev->stats.rx_missed_errors;
  
 -			skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
 -						  tp, pkt_size, addr);
 -			rtl8169_mark_to_asic(desc, rx_buf_sz);
 -			if (!skb) {
 -				dev->stats.rx_dropped++;
 -				continue;
 -			}
 +	return stats;
 +}
  
 -			rtl8169_rx_csum(skb, status);
 -			skb_put(skb, pkt_size);
 -			skb->protocol = eth_type_trans(skb, dev);
 +static void rtl8169_net_suspend(struct net_device *dev)
 +{
 +	struct rtl8169_private *tp = netdev_priv(dev);
  
 -			rtl8169_rx_vlan_tag(desc, skb);
 +	if (!netif_running(dev))
 +		return;
  
 -			napi_gro_receive(&tp->napi, skb);
 +	netif_device_detach(dev);
 +	netif_stop_queue(dev);
  
 -			dev->stats.rx_bytes += pkt_size;
 -			dev->stats.rx_packets++;
 -		}
 +	rtl_lock_work(tp);
 +	napi_disable(&tp->napi);
 +	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
 +	rtl_unlock_work(tp);
  
 -		/* Work around for AMD plateform. */
 -		if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
 -		    (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
 -			desc->opts2 = 0;
 -			cur_rx++;
 -		}
 -	}
 +	rtl_pll_power_down(tp);
 +}
  
 -	count = cur_rx - tp->cur_rx;
 -	tp->cur_rx = cur_rx;
 +#ifdef CONFIG_PM
  
 -	tp->dirty_rx += count;
 +static int rtl8169_suspend(struct device *device)
 +{
 +	struct pci_dev *pdev = to_pci_dev(device);
 +	struct net_device *dev = pci_get_drvdata(pdev);
  
 -	return count;
 +	rtl8169_net_suspend(dev);
 +
 +	return 0;
  }
  
 -static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
 +static void __rtl8169_resume(struct net_device *dev)
  {
 -	struct net_device *dev = dev_instance;
  	struct rtl8169_private *tp = netdev_priv(dev);
 -	void __iomem *ioaddr = tp->mmio_addr;
 -	int handled = 0;
 -	int status;
  
 -	/* loop handling interrupts until we have no new ones or
 -	 * we hit a invalid/hotplug case.
 -	 */
 -	status = RTL_R16(IntrStatus);
 -	while (status && status != 0xffff) {
 -		status &= tp->intr_event;
 -		if (!status)
 -			break;
 -
 -		handled = 1;
 +	netif_device_attach(dev);
  
 -		/* Handle all of the error cases first. These will reset
 -		 * the chip, so just exit the loop.
 -		 */
 -		if (unlikely(!netif_running(dev))) {
 -			rtl8169_hw_reset(tp);
 -			break;
 -		}
 +	rtl_pll_power_up(tp);
  
 -		if (unlikely(status & RxFIFOOver)) {
 -			switch (tp->mac_version) {
 -			/* Work around for rx fifo overflow */
 -			case RTL_GIGA_MAC_VER_11:
 -				netif_stop_queue(dev);
 -				rtl8169_tx_timeout(dev);
 -				goto done;
 -			default:
 -				break;
 -			}
 -		}
 +	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
  
 -		if (unlikely(status & SYSErr)) {
 -			rtl8169_pcierr_interrupt(dev);
 -			break;
 -		}
 +	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
 +}
  
 -		if (status & LinkChg)
 -			__rtl8169_check_link_status(dev, tp, ioaddr, true);
 +static int rtl8169_resume(struct device *device)
 +{
 +	struct pci_dev *pdev = to_pci_dev(device);
 +	struct net_device *dev = pci_get_drvdata(pdev);
 +	struct rtl8169_private *tp = netdev_priv(dev);
  
 -		/* We need to see the lastest version of tp->intr_mask to
 -		 * avoid ignoring an MSI interrupt and having to wait for
 -		 * another event which may never come.
 -		 */
 -		smp_rmb();
 -		if (status & tp->intr_mask & tp->napi_event) {
 -			RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
 -			tp->intr_mask = ~tp->napi_event;
 +	rtl8169_init_phy(dev, tp);
  
 -			if (likely(napi_schedule_prep(&tp->napi)))
 -				__napi_schedule(&tp->napi);
 -			else
 -				netif_info(tp, intr, dev,
 -					   "interrupt %04x in poll\n", status);
 -		}
 +	if (netif_running(dev))
 +		__rtl8169_resume(dev);
  
 -		/* We only get a new MSI interrupt when all active irq
 -		 * sources on the chip have been acknowledged. So, ack
 -		 * everything we've seen and check if new sources have become
 -		 * active to avoid blocking all interrupts from the chip.
 -		 */
 -		RTL_W16(IntrStatus,
 -			(status & RxFIFOOver) ? (status | RxOverflow) : status);
 -		status = RTL_R16(IntrStatus);
 -	}
 -done:
 -	return IRQ_RETVAL(handled);
 +	return 0;
  }
  
 -static int rtl8169_poll(struct napi_struct *napi, int budget)
 +static int rtl8169_runtime_suspend(struct device *device)
  {
 -	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
 -	struct net_device *dev = tp->dev;
 -	void __iomem *ioaddr = tp->mmio_addr;
 -	int work_done;
 +	struct pci_dev *pdev = to_pci_dev(device);
 +	struct net_device *dev = pci_get_drvdata(pdev);
 +	struct rtl8169_private *tp = netdev_priv(dev);
  
 -	work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
 -	rtl8169_tx_interrupt(dev, tp, ioaddr);
 +	if (!tp->TxDescArray)
 +		return 0;
  
 -	if (work_done < budget) {
 -		napi_complete(napi);
 +	rtl_lock_work(tp);
 +	tp->saved_wolopts = __rtl8169_get_wol(tp);
 +	__rtl8169_set_wol(tp, WAKE_ANY);
 +	rtl_unlock_work(tp);
  
 -		/* We need for force the visibility of tp->intr_mask
 -		 * for other CPUs, as we can loose an MSI interrupt
 -		 * and potentially wait for a retransmit timeout if we don't.
 -		 * The posted write to IntrMask is safe, as it will
 -		 * eventually make it to the chip and we won't loose anything
 -		 * until it does.
 -		 */
 -		tp->intr_mask = 0xffff;
 -		wmb();
 -		RTL_W16(IntrMask, tp->intr_event);
 -	}
 +	rtl8169_net_suspend(dev);
  
 -	return work_done;
 +	return 0;
  }
  
 -static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
 +static int rtl8169_runtime_resume(struct device *device)
  {
 +	struct pci_dev *pdev = to_pci_dev(device);
 +	struct net_device *dev = pci_get_drvdata(pdev);
  	struct rtl8169_private *tp = netdev_priv(dev);
  
 -	if (tp->mac_version > RTL_GIGA_MAC_VER_06)
 -		return;
 +	if (!tp->TxDescArray)
 +		return 0;
  
 -	dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
 -	RTL_W32(RxMissed, 0);
 +	rtl_lock_work(tp);
 +	__rtl8169_set_wol(tp, tp->saved_wolopts);
 +	tp->saved_wolopts = 0;
 +	rtl_unlock_work(tp);
 +
 +	rtl8169_init_phy(dev, tp);
 +
 +	__rtl8169_resume(dev);
 +
 +	return 0;
  }
  
 -static void rtl8169_down(struct net_device *dev)
 +static int rtl8169_runtime_idle(struct device *device)
  {
 +	struct pci_dev *pdev = to_pci_dev(device);
 +	struct net_device *dev = pci_get_drvdata(pdev);
  	struct rtl8169_private *tp = netdev_priv(dev);
 -	void __iomem *ioaddr = tp->mmio_addr;
 -
 -	del_timer_sync(&tp->timer);
  
 -	netif_stop_queue(dev);
 -
 -	napi_disable(&tp->napi);
 +	return tp->TxDescArray ? -EBUSY : 0;
 +}
  
 -	spin_lock_irq(&tp->lock);
 +static const struct dev_pm_ops rtl8169_pm_ops = {
 +	.suspend		= rtl8169_suspend,
 +	.resume			= rtl8169_resume,
 +	.freeze			= rtl8169_suspend,
 +	.thaw			= rtl8169_resume,
 +	.poweroff		= rtl8169_suspend,
 +	.restore		= rtl8169_resume,
 +	.runtime_suspend	= rtl8169_runtime_suspend,
 +	.runtime_resume		= rtl8169_runtime_resume,
 +	.runtime_idle		= rtl8169_runtime_idle,
 +};
  
 -	rtl8169_hw_reset(tp);
 -	/*
 -	 * At this point device interrupts can not be enabled in any function,
 -	 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
 -	 * rtl8169_reinit_task) and napi is disabled (rtl8169_poll).
 -	 */
 -	rtl8169_rx_missed(dev, ioaddr);
 +#define RTL8169_PM_OPS	(&rtl8169_pm_ops)
  
 -	spin_unlock_irq(&tp->lock);
 +#else /* !CONFIG_PM */
  
 -	synchronize_irq(dev->irq);
 +#define RTL8169_PM_OPS	NULL
  
 -	/* Give a racing hard_start_xmit a few cycles to complete. */
 -	synchronize_sched();  /* FIXME: should this be synchronize_irq()? */
 +#endif /* !CONFIG_PM */
  
 -	rtl8169_tx_clear(tp);
 +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
 +{
 +	void __iomem *ioaddr = tp->mmio_addr;
  
 -	rtl8169_rx_clear(tp);
 +	/* WoL fails with 8168b when the receiver is disabled. */
 +	switch (tp->mac_version) {
 +	case RTL_GIGA_MAC_VER_11:
 +	case RTL_GIGA_MAC_VER_12:
 +	case RTL_GIGA_MAC_VER_17:
 +		pci_clear_master(tp->pci_dev);
  
 -	rtl_pll_power_down(tp);
 +		RTL_W8(ChipCmd, CmdRxEnb);
 +		/* PCI commit */
 +		RTL_R8(ChipCmd);
 +		break;
 +	default:
 +		break;
 +	}
  }
  
 -static int rtl8169_close(struct net_device *dev)
 +static void rtl_shutdown(struct pci_dev *pdev)
  {
 +	struct net_device *dev = pci_get_drvdata(pdev);
  	struct rtl8169_private *tp = netdev_priv(dev);
 -	struct pci_dev *pdev = tp->pci_dev;
 +	struct device *d = &pdev->dev;
  
 -	pm_runtime_get_sync(&pdev->dev);
 +	pm_runtime_get_sync(d);
  
 -	/* Update counters before going down */
 -	rtl8169_update_counters(dev);
 +	rtl8169_net_suspend(dev);
  
 -	rtl8169_down(dev);
 +	/* Restore original MAC address */
 +	rtl_rar_set(tp, dev->perm_addr);
  
 -	free_irq(dev->irq, dev);
 +	rtl8169_hw_reset(tp);
  
 -	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
 -			  tp->RxPhyAddr);
 -	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
 -			  tp->TxPhyAddr);
 -	tp->TxDescArray = NULL;
 -	tp->RxDescArray = NULL;
 +	if (system_state == SYSTEM_POWER_OFF) {
 +		if (__rtl8169_get_wol(tp) & WAKE_ANY) {
 +			rtl_wol_suspend_quirk(tp);
 +			rtl_wol_shutdown_quirk(tp);
 +		}
  
 -	pm_runtime_put_sync(&pdev->dev);
 +		pci_wake_from_d3(pdev, true);
 +		pci_set_power_state(pdev, PCI_D3hot);
 +	}
  
 -	return 0;
 +	pm_runtime_put_noidle(d);
  }
  
 -static void rtl_set_rx_mode(struct net_device *dev)
 +static void __devexit rtl_remove_one(struct pci_dev *pdev)
  {
 +	struct net_device *dev = pci_get_drvdata(pdev);
  	struct rtl8169_private *tp = netdev_priv(dev);
 -	void __iomem *ioaddr = tp->mmio_addr;
 -	unsigned long flags;
 -	u32 mc_filter[2];	/* Multicast hash filter */
 -	int rx_mode;
 -	u32 tmp = 0;
 -
 -	if (dev->flags & IFF_PROMISC) {
 -		/* Unconditionally log net taps. */
 -		netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
 -		rx_mode =
 -		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 -		    AcceptAllPhys;
 -		mc_filter[1] = mc_filter[0] = 0xffffffff;
 -	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 -		   (dev->flags & IFF_ALLMULTI)) {
 -		/* Too many to filter perfectly -- accept all multicasts. */
 -		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 -		mc_filter[1] = mc_filter[0] = 0xffffffff;
 -	} else {
 -		struct netdev_hw_addr *ha;
  
 -		rx_mode = AcceptBroadcast | AcceptMyPhys;
 -		mc_filter[1] = mc_filter[0] = 0;
 -		netdev_for_each_mc_addr(ha, dev) {
 -			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 -			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 -			rx_mode |= AcceptMulticast;
 -		}
 +	if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
 +	    tp->mac_version == RTL_GIGA_MAC_VER_28 ||
 +	    tp->mac_version == RTL_GIGA_MAC_VER_31) {
 +		rtl8168_driver_stop(tp);
  	}
  
 -	spin_lock_irqsave(&tp->lock, flags);
 -
 -	tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
 +	cancel_work_sync(&tp->wk.work);
  
 -	if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
 -		u32 data = mc_filter[0];
 +	unregister_netdev(dev);
  
 -		mc_filter[0] = swab32(mc_filter[1]);
 -		mc_filter[1] = swab32(data);
 -	}
 +	rtl_release_firmware(tp);
  
 -	RTL_W32(MAR0 + 4, mc_filter[1]);
 -	RTL_W32(MAR0 + 0, mc_filter[0]);
 +	if (pci_dev_run_wake(pdev))
 +		pm_runtime_get_noresume(&pdev->dev);
  
 -	RTL_W32(RxConfig, tmp);
 +	/* restore original MAC address */
 +	rtl_rar_set(tp, dev->perm_addr);
  
 -	spin_unlock_irqrestore(&tp->lock, flags);
 +	rtl_disable_msi(pdev, tp);
 +	rtl8169_release_board(pdev, dev, tp->mmio_addr);
 +	pci_set_drvdata(pdev, NULL);
  }
  
 -/**
 - *  rtl8169_get_stats - Get rtl8169 read/write statistics
 - *  @dev: The Ethernet Device to get statistics for
 - *
 - *  Get TX/RX statistics for rtl8169
 - */
 -static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
 +static const struct net_device_ops rtl_netdev_ops = {
 +	.ndo_open		= rtl_open,
 +	.ndo_stop		= rtl8169_close,
 +	.ndo_get_stats64	= rtl8169_get_stats64,
 +	.ndo_start_xmit		= rtl8169_start_xmit,
 +	.ndo_tx_timeout		= rtl8169_tx_timeout,
 +	.ndo_validate_addr	= eth_validate_addr,
 +	.ndo_change_mtu		= rtl8169_change_mtu,
 +	.ndo_fix_features	= rtl8169_fix_features,
 +	.ndo_set_features	= rtl8169_set_features,
 +	.ndo_set_mac_address	= rtl_set_mac_address,
 +	.ndo_do_ioctl		= rtl8169_ioctl,
 +	.ndo_set_rx_mode	= rtl_set_rx_mode,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +	.ndo_poll_controller	= rtl8169_netpoll,
 +#endif
 +
 +};
 +
 +static const struct rtl_cfg_info {
 +	void (*hw_start)(struct net_device *);
 +	unsigned int region;
 +	unsigned int align;
 +	u16 event_slow;
 +	unsigned features;
 +	u8 default_ver;
 +} rtl_cfg_infos [] = {
 +	[RTL_CFG_0] = {
 +		.hw_start	= rtl_hw_start_8169,
 +		.region		= 1,
 +		.align		= 0,
 +		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver,
 +		.features	= RTL_FEATURE_GMII,
 +		.default_ver	= RTL_GIGA_MAC_VER_01,
 +	},
 +	[RTL_CFG_1] = {
 +		.hw_start	= rtl_hw_start_8168,
 +		.region		= 2,
 +		.align		= 8,
 +		.event_slow	= SYSErr | LinkChg | RxOverflow,
 +		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI,
 +		.default_ver	= RTL_GIGA_MAC_VER_11,
 +	},
 +	[RTL_CFG_2] = {
 +		.hw_start	= rtl_hw_start_8101,
 +		.region		= 2,
 +		.align		= 8,
 +		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver |
 +				  PCSTimeout,
 +		.features	= RTL_FEATURE_MSI,
 +		.default_ver	= RTL_GIGA_MAC_VER_13,
 +	}
 +};
 +
 +/* Cfg9346_Unlock assumed. */
 +static unsigned rtl_try_msi(struct rtl8169_private *tp,
 +			    const struct rtl_cfg_info *cfg)
  {
 -	struct rtl8169_private *tp = netdev_priv(dev);
  	void __iomem *ioaddr = tp->mmio_addr;
 -	unsigned long flags;
 +	unsigned msi = 0;
 +	u8 cfg2;
  
 -	if (netif_running(dev)) {
 -		spin_lock_irqsave(&tp->lock, flags);
 -		rtl8169_rx_missed(dev, ioaddr);
 -		spin_unlock_irqrestore(&tp->lock, flags);
 +	cfg2 = RTL_R8(Config2) & ~MSIEnable;
 +	if (cfg->features & RTL_FEATURE_MSI) {
 +		if (pci_enable_msi(tp->pci_dev)) {
 +			netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
 +		} else {
 +			cfg2 |= MSIEnable;
 +			msi = RTL_FEATURE_MSI;
 +		}
  	}
 -
 -	return &dev->stats;
 +	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
 +		RTL_W8(Config2, cfg2);
 +	return msi;
  }
  
 -static void rtl8169_net_suspend(struct net_device *dev)
 +static int __devinit
 +rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
 -	struct rtl8169_private *tp = netdev_priv(dev);
 +	const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
 +	const unsigned int region = cfg->region;
 +	struct rtl8169_private *tp;
 +	struct mii_if_info *mii;
 +	struct net_device *dev;
 +	void __iomem *ioaddr;
 +	int chipset, i;
 +	int rc;
  
 -	if (!netif_running(dev))
 -		return;
 +	if (netif_msg_drv(&debug)) {
 +		printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
 +		       MODULENAME, RTL8169_VERSION);
 +	}
  
 -	rtl_pll_power_down(tp);
 +	dev = alloc_etherdev(sizeof (*tp));
 +	if (!dev) {
 +		rc = -ENOMEM;
 +		goto out;
 +	}
 +
 +	SET_NETDEV_DEV(dev, &pdev->dev);
 +	dev->netdev_ops = &rtl_netdev_ops;
 +	tp = netdev_priv(dev);
 +	tp->dev = dev;
 +	tp->pci_dev = pdev;
 +	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
 +
 +	mii = &tp->mii;
 +	mii->dev = dev;
 +	mii->mdio_read = rtl_mdio_read;
 +	mii->mdio_write = rtl_mdio_write;
 +	mii->phy_id_mask = 0x1f;
 +	mii->reg_num_mask = 0x1f;
 +	mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
 +
 +	/* disable ASPM completely as that cause random device stop working
 +	 * problems as well as full system hangs for some PCIe devices users */
 +	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
 +				     PCIE_LINK_STATE_CLKPM);
  
 -	netif_device_detach(dev);
 -	netif_stop_queue(dev);
 -}
 +	/* enable device (incl. PCI PM wakeup and hotplug setup) */
 +	rc = pci_enable_device(pdev);
 +	if (rc < 0) {
 +		netif_err(tp, probe, dev, "enable failure\n");
 +		goto err_out_free_dev_1;
 +	}
  
 -#ifdef CONFIG_PM
 +	if (pci_set_mwi(pdev) < 0)
 +		netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
  
 -static int rtl8169_suspend(struct device *device)
 -{
 -	struct pci_dev *pdev = to_pci_dev(device);
 -	struct net_device *dev = pci_get_drvdata(pdev);
 +	/* make sure PCI base addr 1 is MMIO */
 +	if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
 +		netif_err(tp, probe, dev,
 +			  "region #%d not an MMIO resource, aborting\n",
 +			  region);
 +		rc = -ENODEV;
 +		goto err_out_mwi_2;
 +	}
  
 -	rtl8169_net_suspend(dev);
 +	/* check for weird/broken PCI region reporting */
 +	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
 +		netif_err(tp, probe, dev,
 +			  "Invalid PCI region size(s), aborting\n");
 +		rc = -ENODEV;
 +		goto err_out_mwi_2;
 +	}
  
 -	return 0;
 -}
 +	rc = pci_request_regions(pdev, MODULENAME);
 +	if (rc < 0) {
 +		netif_err(tp, probe, dev, "could not request regions\n");
 +		goto err_out_mwi_2;
 +	}
  
 -static void __rtl8169_resume(struct net_device *dev)
 -{
 -	struct rtl8169_private *tp = netdev_priv(dev);
 +	tp->cp_cmd = RxChkSum;
  
 -	netif_device_attach(dev);
 +	if ((sizeof(dma_addr_t) > 4) &&
 +	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
 +		tp->cp_cmd |= PCIDAC;
 +		dev->features |= NETIF_F_HIGHDMA;
 +	} else {
 +		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +		if (rc < 0) {
 +			netif_err(tp, probe, dev, "DMA configuration failed\n");
 +			goto err_out_free_res_3;
 +		}
 +	}
  
 -	rtl_pll_power_up(tp);
 +	/* ioremap MMIO region */
 +	ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
 +	if (!ioaddr) {
 +		netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
 +		rc = -EIO;
 +		goto err_out_free_res_3;
 +	}
 +	tp->mmio_addr = ioaddr;
  
 -	rtl8169_schedule_work(dev, rtl8169_reset_task);
 -}
 +	if (!pci_is_pcie(pdev))
 +		netif_info(tp, probe, dev, "not PCI Express\n");
  
 -static int rtl8169_resume(struct device *device)
 -{
 -	struct pci_dev *pdev = to_pci_dev(device);
 -	struct net_device *dev = pci_get_drvdata(pdev);
 -	struct rtl8169_private *tp = netdev_priv(dev);
 +	/* Identify chip attached to board */
 +	rtl8169_get_mac_version(tp, dev, cfg->default_ver);
  
 -	rtl8169_init_phy(dev, tp);
 +	rtl_init_rxcfg(tp);
  
 -	if (netif_running(dev))
 -		__rtl8169_resume(dev);
 +	rtl_irq_disable(tp);
  
 -	return 0;
 -}
 +	rtl_hw_reset(tp);
  
 -static int rtl8169_runtime_suspend(struct device *device)
 -{
 -	struct pci_dev *pdev = to_pci_dev(device);
 -	struct net_device *dev = pci_get_drvdata(pdev);
 -	struct rtl8169_private *tp = netdev_priv(dev);
 +	rtl_ack_events(tp, 0xffff);
  
 -	if (!tp->TxDescArray)
 -		return 0;
 +	pci_set_master(pdev);
  
 -	spin_lock_irq(&tp->lock);
 -	tp->saved_wolopts = __rtl8169_get_wol(tp);
 -	__rtl8169_set_wol(tp, WAKE_ANY);
 -	spin_unlock_irq(&tp->lock);
 +	/*
 +	 * Pretend we are using VLANs; This bypasses a nasty bug where
 +	 * Interrupts stop flowing on high load on 8110SCd controllers.
 +	 */
 +	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
 +		tp->cp_cmd |= RxVlan;
  
 -	rtl8169_net_suspend(dev);
 +	rtl_init_mdio_ops(tp);
 +	rtl_init_pll_power_ops(tp);
 +	rtl_init_jumbo_ops(tp);
  
 -	return 0;
 -}
 +	rtl8169_print_mac_version(tp);
  
 -static int rtl8169_runtime_resume(struct device *device)
 -{
 -	struct pci_dev *pdev = to_pci_dev(device);
 -	struct net_device *dev = pci_get_drvdata(pdev);
 -	struct rtl8169_private *tp = netdev_priv(dev);
 +	chipset = tp->mac_version;
 +	tp->txd_version = rtl_chip_infos[chipset].txd_version;
  
 -	if (!tp->TxDescArray)
 -		return 0;
 +	RTL_W8(Cfg9346, Cfg9346_Unlock);
 +	RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
 +	RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
 +	if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
 +		tp->features |= RTL_FEATURE_WOL;
 +	if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
 +		tp->features |= RTL_FEATURE_WOL;
 +	tp->features |= rtl_try_msi(tp, cfg);
 +	RTL_W8(Cfg9346, Cfg9346_Lock);
  
 -	spin_lock_irq(&tp->lock);
 -	__rtl8169_set_wol(tp, tp->saved_wolopts);
 -	tp->saved_wolopts = 0;
 -	spin_unlock_irq(&tp->lock);
 +	if (rtl_tbi_enabled(tp)) {
 +		tp->set_speed = rtl8169_set_speed_tbi;
 +		tp->get_settings = rtl8169_gset_tbi;
 +		tp->phy_reset_enable = rtl8169_tbi_reset_enable;
 +		tp->phy_reset_pending = rtl8169_tbi_reset_pending;
 +		tp->link_ok = rtl8169_tbi_link_ok;
 +		tp->do_ioctl = rtl_tbi_ioctl;
 +	} else {
 +		tp->set_speed = rtl8169_set_speed_xmii;
 +		tp->get_settings = rtl8169_gset_xmii;
 +		tp->phy_reset_enable = rtl8169_xmii_reset_enable;
 +		tp->phy_reset_pending = rtl8169_xmii_reset_pending;
 +		tp->link_ok = rtl8169_xmii_link_ok;
 +		tp->do_ioctl = rtl_xmii_ioctl;
 +	}
  
 -	rtl8169_init_phy(dev, tp);
 +	mutex_init(&tp->wk.mutex);
  
 -	__rtl8169_resume(dev);
 +	/* Get MAC address */
 +	for (i = 0; i < ETH_ALEN; i++)
 +		dev->dev_addr[i] = RTL_R8(MAC0 + i);
 +	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  
 -	return 0;
 -}
 +	SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
 +	dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
  
 -static int rtl8169_runtime_idle(struct device *device)
 -{
 -	struct pci_dev *pdev = to_pci_dev(device);
 -	struct net_device *dev = pci_get_drvdata(pdev);
 -	struct rtl8169_private *tp = netdev_priv(dev);
 +	netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
  
 -	return tp->TxDescArray ? -EBUSY : 0;
 -}
 +	/* don't enable SG, IP_CSUM and TSO by default - it might not work
 +	 * properly for all devices */
 +	dev->features |= NETIF_F_RXCSUM |
 +		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  
 -static const struct dev_pm_ops rtl8169_pm_ops = {
 -	.suspend		= rtl8169_suspend,
 -	.resume			= rtl8169_resume,
 -	.freeze			= rtl8169_suspend,
 -	.thaw			= rtl8169_resume,
 -	.poweroff		= rtl8169_suspend,
 -	.restore		= rtl8169_resume,
 -	.runtime_suspend	= rtl8169_runtime_suspend,
 -	.runtime_resume		= rtl8169_runtime_resume,
 -	.runtime_idle		= rtl8169_runtime_idle,
 -};
 +	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
 +		NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 +	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
 +		NETIF_F_HIGHDMA;
  
 -#define RTL8169_PM_OPS	(&rtl8169_pm_ops)
 +	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
 +		/* 8110SCd requires hardware Rx VLAN - disallow toggling */
 +		dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
  
 -#else /* !CONFIG_PM */
 +	dev->hw_features |= NETIF_F_RXALL;
 +	dev->hw_features |= NETIF_F_RXFCS;
  
 -#define RTL8169_PM_OPS	NULL
 +	tp->hw_start = cfg->hw_start;
 +	tp->event_slow = cfg->event_slow;
  
 -#endif /* !CONFIG_PM */
 +	tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
 +		~(RxBOVF | RxFOVF) : ~0;
  
 -static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
 -{
 -	void __iomem *ioaddr = tp->mmio_addr;
 +	init_timer(&tp->timer);
 +	tp->timer.data = (unsigned long) dev;
 +	tp->timer.function = rtl8169_phy_timer;
  
 -	/* WoL fails with 8168b when the receiver is disabled. */
 -	switch (tp->mac_version) {
 -	case RTL_GIGA_MAC_VER_11:
 -	case RTL_GIGA_MAC_VER_12:
 -	case RTL_GIGA_MAC_VER_17:
 -		pci_clear_master(tp->pci_dev);
 +	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
  
 -		RTL_W8(ChipCmd, CmdRxEnb);
 -		/* PCI commit */
 -		RTL_R8(ChipCmd);
 -		break;
 -	default:
 -		break;
 -	}
 -}
 +	rc = register_netdev(dev);
 +	if (rc < 0)
 +		goto err_out_msi_4;
  
 -static void rtl_shutdown(struct pci_dev *pdev)
 -{
 -	struct net_device *dev = pci_get_drvdata(pdev);
 -	struct rtl8169_private *tp = netdev_priv(dev);
 +	pci_set_drvdata(pdev, dev);
  
 -	rtl8169_net_suspend(dev);
 +	netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
 +		   rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
 +		   (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
 +	if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
 +		netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
 +			   "tx checksumming: %s]\n",
 +			   rtl_chip_infos[chipset].jumbo_max,
 +			   rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
 +	}
  
 -	/* Restore original MAC address */
 -	rtl_rar_set(tp, dev->perm_addr);
 +	if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
 +	    tp->mac_version == RTL_GIGA_MAC_VER_28 ||
 +	    tp->mac_version == RTL_GIGA_MAC_VER_31) {
 +		rtl8168_driver_start(tp);
 +	}
  
 -	spin_lock_irq(&tp->lock);
 +	device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
  
 -	rtl8169_hw_reset(tp);
 +	if (pci_dev_run_wake(pdev))
 +		pm_runtime_put_noidle(&pdev->dev);
  
 -	spin_unlock_irq(&tp->lock);
 +	netif_carrier_off(dev);
  
 -	if (system_state == SYSTEM_POWER_OFF) {
 -		if (__rtl8169_get_wol(tp) & WAKE_ANY) {
 -			rtl_wol_suspend_quirk(tp);
 -			rtl_wol_shutdown_quirk(tp);
 -		}
 +out:
 +	return rc;
  
 -		pci_wake_from_d3(pdev, true);
 -		pci_set_power_state(pdev, PCI_D3hot);
 -	}
 +err_out_msi_4:
 +	rtl_disable_msi(pdev, tp);
 +	iounmap(ioaddr);
 +err_out_free_res_3:
 +	pci_release_regions(pdev);
 +err_out_mwi_2:
 +	pci_clear_mwi(pdev);
 +	pci_disable_device(pdev);
 +err_out_free_dev_1:
 +	free_netdev(dev);
 +	goto out;
  }
  
  static struct pci_driver rtl8169_pci_driver = {

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ