lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20091208221612.28373.74512.stgit@localhost.localdomain>
Date:	Tue, 08 Dec 2009 14:16:14 -0800
From:	Jeff Kirsher <jeffrey.t.kirsher@...el.com>
To:	netdev@...r.kernel.org
Cc:	gospo@...hat.com, Greg Rose <gregory.v.rose@...el.com>,
	Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@...el.com>,
	Jeff Kirsher <jeffrey.t.kirsher@...el.com>
Subject: [RFC PATCH 10/12] ixgbevf: Driver main and ethool interface module
	and main header

From: Greg Rose <gregory.v.rose@...el.com>

These modules and header contain the Linux OS network interface code and core
interrupt and network send/receive handlers.

Signed-off-by: Greg Rose <gregory.v.rose@...el.com>
Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@...el.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@...el.com>
---

 drivers/net/ixgbevf/ethtool.c      |  815 ++++++++
 drivers/net/ixgbevf/ixgbevf.h      |  318 +++
 drivers/net/ixgbevf/ixgbevf_main.c | 3588 ++++++++++++++++++++++++++++++++++++
 3 files changed, 4721 insertions(+), 0 deletions(-)
 create mode 100644 drivers/net/ixgbevf/ethtool.c
 create mode 100644 drivers/net/ixgbevf/ixgbevf.h
 create mode 100644 drivers/net/ixgbevf/ixgbevf_main.c

diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 0000000..0e3f50a
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,815 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@...ts.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbevf */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#ifdef SIOCETHTOOL
+#include <linux/uaccess.h>
+
+#include "ixgbevf.h"
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+	int base_stat_offset;
+};
+
+#define IXGBEVF_STAT(m, b)  sizeof(((struct ixgbevf_adapter *)0)->m), \
+			    offsetof(struct ixgbevf_adapter, m),      \
+			    offsetof(struct ixgbevf_adapter, b)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+	{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)},
+	{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)},
+	{"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)},
+	{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)},
+	{"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)},
+	{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)},
+	{"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)},
+	{"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)},
+	{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)},
+	{"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN 0
+#define IXGBE_GLOBAL_STATS_LEN	ARRAY_SIZE(ixgbe_gstrings_stats)
+
+#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)",
+	"Link test   (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ixgbevf_get_settings(struct net_device *netdev,
+				struct ethtool_cmd *ecmd)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 link_speed = 0;
+	bool link_up;
+
+	ecmd->supported = SUPPORTED_10000baseT_Full;
+	ecmd->autoneg = AUTONEG_DISABLE;
+	ecmd->transceiver = XCVR_DUMMY1;
+	ecmd->port = -1;
+
+	if (!in_interrupt()) {
+		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+	} else {
+		/*
+		 * this is a special workaround for cases where bonding
+		 * calls this routine from interrupt context
+		 */
+		link_speed = adapter->link_speed;
+		link_up = adapter->link_up;
+	}
+
+	if (link_up) {
+		ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+			       SPEED_10000 : SPEED_1000;
+		ecmd->duplex = DUPLEX_FULL;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	return 0;
+}
+
+static int ixgbevf_set_settings(struct net_device *netdev,
+				struct ethtool_cmd *ecmd)
+{
+	return -EINVAL;
+}
+
+static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
+}
+
+static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	if (data)
+		adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+	else
+		adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+	if (netif_running(netdev)) {
+		if (!adapter->dev_closed)
+			ixgbevf_reinit_locked(adapter);
+	} else {
+		ixgbevf_reset(adapter);
+	}
+
+	return 0;
+}
+
+static u32 ixgbevf_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_IP_CSUM) != 0;
+}
+
+static int ixgbevf_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	if (data)
+#ifdef NETIF_F_IPV6_CSUM
+		netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+	else
+		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+#else
+		netdev->features |= NETIF_F_IP_CSUM;
+	else
+		netdev->features &= ~NETIF_F_IP_CSUM;
+#endif
+
+	return 0;
+}
+
+#undef ethtool_op_set_tso
+#ifdef NETIF_F_TSO
+static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
+{
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+	if (data) {
+		netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+		netdev->features |= NETIF_F_TSO6;
+#endif
+	} else {
+		netif_tx_stop_all_queues(netdev);
+		netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+		netdev->features &= ~NETIF_F_TSO6;
+#endif
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+#ifdef NETIF_F_HW_VLAN_TX
+		/* disable TSO on all VLANs if they're present */
+		if (adapter->vlgrp) {
+			int i;
+			struct net_device *v_netdev;
+			for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+				v_netdev =
+				       vlan_group_get_device(adapter->vlgrp, i);
+				if (v_netdev) {
+					v_netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+					v_netdev->features &= ~NETIF_F_TSO6;
+#endif
+					vlan_group_set_device(adapter->vlgrp, i,
+							      v_netdev);
+				}
+			}
+		}
+#endif
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+		netif_tx_start_all_queues(netdev);
+	}
+	return 0;
+}
+#endif /* NETIF_F_TSO */
+
+static u32 ixgbevf_get_msglevel(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
+
+static int ixgbevf_get_regs_len(struct net_device *netdev)
+{
+#define IXGBE_REGS_LEN  45
+	return IXGBE_REGS_LEN * sizeof(u32);
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+static char *ixgbevf_reg_names[] = {
+	"IXGBE_VFCTRL",
+	"IXGBE_VFSTATUS",
+	"IXGBE_VFLINKS",
+	"IXGBE_VFRXMEMWRAP",
+	"IXGBE_VFRTIMER",
+	"IXGBE_VTEICR",
+	"IXGBE_VTEICS",
+	"IXGBE_VTEIMS",
+	"IXGBE_VTEIMC",
+	"IXGBE_VTEIAC",
+	"IXGBE_VTEIAM",
+	"IXGBE_VTEITR",
+	"IXGBE_VTIVAR",
+	"IXGBE_VTIVAR_MISC",
+	"IXGBE_VFRDBAL0",
+	"IXGBE_VFRDBAL1",
+	"IXGBE_VFRDBAH0",
+	"IXGBE_VFRDBAH1",
+	"IXGBE_VFRDLEN0",
+	"IXGBE_VFRDLEN1",
+	"IXGBE_VFRDH0",
+	"IXGBE_VFRDH1",
+	"IXGBE_VFRDT0",
+	"IXGBE_VFRDT1",
+	"IXGBE_VFRXDCTL0",
+	"IXGBE_VFRXDCTL1",
+	"IXGBE_VFSRRCTL0",
+	"IXGBE_VFSRRCTL1",
+	"IXGBE_VFPSRTYPE",
+	"IXGBE_VFTDBAL0",
+	"IXGBE_VFTDBAL1",
+	"IXGBE_VFTDBAH0",
+	"IXGBE_VFTDBAH1",
+	"IXGBE_VFTDLEN0",
+	"IXGBE_VFTDLEN1",
+	"IXGBE_VFTDH0",
+	"IXGBE_VFTDH1",
+	"IXGBE_VFTDT0",
+	"IXGBE_VFTDT1",
+	"IXGBE_VFTXDCTL0",
+	"IXGBE_VFTXDCTL1",
+	"IXGBE_VFTDWBAL0",
+	"IXGBE_VFTDWBAL1",
+	"IXGBE_VFTDWBAH0",
+	"IXGBE_VFTDWBAH1"
+};
+
+
+static void ixgbevf_get_regs(struct net_device *netdev,
+			     struct ethtool_regs *regs,
+			     void *p)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u8 i;
+
+	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
+
+	regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+	/* General Registers */
+	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
+	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
+	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
+	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
+
+	/* Interrupt */
+	/* don't read EICR because it can clear interrupt causes, instead
+	 * read EICS which is a shadow but doesn't clear EICR */
+	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
+	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
+	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
+	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
+	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+
+	/* Receive DMA */
+	for (i = 0; i < 2; i++)
+		regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+
+	/* Receive */
+	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
+
+	/* Transmit */
+	for (i = 0; i < 2; i++)
+		regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
+
+	for (i = 0; i < IXGBE_REGS_LEN; i++)
+		printk(KERN_INFO "%s\t%8.8x\n",
+		       ixgbevf_reg_names[i], regs_buff[i]);
+}
+
+static int ixgbevf_get_eeprom(struct net_device *netdev,
+			      struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	return -EOPNOTSUPP;
+}
+
+static int ixgbevf_set_eeprom(struct net_device *netdev,
+			      struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	return -EOPNOTSUPP;
+}
+
+static void ixgbevf_get_drvinfo(struct net_device *netdev,
+				struct ethtool_drvinfo *drvinfo)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	strncpy(drvinfo->driver, ixgbevf_driver_name, 32);
+	strncpy(drvinfo->version, ixgbevf_driver_version, 32);
+
+	strncpy(drvinfo->fw_version, "N/A", 4);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->n_stats = IXGBEVF_STATS_LEN;
+	drvinfo->testinfo_len = IXGBE_TEST_LEN;
+	drvinfo->regdump_len = ixgbevf_get_regs_len(netdev);
+}
+
+static void ixgbevf_get_ringparam(struct net_device *netdev,
+				  struct ethtool_ringparam *ring)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbevf_ring *tx_ring = adapter->tx_ring;
+	struct ixgbevf_ring *rx_ring = adapter->rx_ring;
+
+	ring->rx_max_pending = IXGBEVF_MAX_RXD;
+	ring->tx_max_pending = IXGBEVF_MAX_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = rx_ring->count;
+	ring->tx_pending = tx_ring->count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbevf_set_ringparam(struct net_device *netdev,
+				 struct ethtool_ringparam *ring)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbevf_ring *tx_ring, *rx_ring;
+	int i, err;
+	u32 new_rx_count, new_tx_count;
+	bool need_update = false;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
+	new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
+	new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	if ((new_tx_count == adapter->tx_ring->count) &&
+	    (new_rx_count == adapter->rx_ring->count)) {
+		/* nothing to do */
+		return 0;
+	}
+
+	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+		msleep(1);
+
+	tx_ring = kcalloc(adapter->num_tx_queues,
+			       sizeof(struct ixgbevf_ring), GFP_KERNEL);
+	if (!tx_ring) {
+		err = -ENOMEM;
+		goto err_setup;
+	}
+
+	if (new_tx_count != adapter->tx_ring_count) {
+		memcpy(tx_ring, adapter->tx_ring,
+		       adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			tx_ring[i].count = new_tx_count;
+			err = ixgbevf_setup_tx_resources(adapter,
+							 &tx_ring[i]);
+			if (err) {
+				while (i) {
+					i--;
+					ixgbevf_free_tx_resources(adapter,
+								  &tx_ring[i]);
+				}
+				goto err_setup;
+			}
+			tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+		}
+		need_update = true;
+	}
+
+	rx_ring = kcalloc(adapter->num_rx_queues,
+			       sizeof(struct ixgbevf_ring), GFP_KERNEL);
+	if ((!rx_ring) && (need_update)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+		kfree(tx_ring);
+		err = -ENOMEM;
+		goto err_setup;
+	}
+
+	if (new_rx_count != adapter->rx_ring_count) {
+		memcpy(rx_ring, adapter->rx_ring,
+		       adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			rx_ring[i].count = new_rx_count;
+			err = ixgbevf_setup_rx_resources(adapter,
+							 &rx_ring[i]);
+			if (err) {
+				while (i) {
+					i--;
+					ixgbevf_free_rx_resources(adapter,
+								  &rx_ring[i]);
+				}
+				goto err_setup;
+			}
+			rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+		}
+		need_update = true;
+	}
+
+	/* if rings need to be updated, here's the place to do it in one shot */
+	if (need_update) {
+		if (netif_running(netdev))
+			ixgbevf_down(adapter);
+
+		/* tx */
+		if (new_tx_count != adapter->tx_ring_count) {
+			kfree(adapter->tx_ring);
+			adapter->tx_ring = tx_ring;
+			tx_ring = NULL;
+			adapter->tx_ring_count = new_tx_count;
+		}
+
+		/* rx */
+		if (new_rx_count != adapter->rx_ring_count) {
+			kfree(adapter->rx_ring);
+			adapter->rx_ring = rx_ring;
+			rx_ring = NULL;
+			adapter->rx_ring_count = new_rx_count;
+		}
+	}
+
+	/* success! */
+	err = 0;
+	if (netif_running(netdev))
+		ixgbevf_up(adapter);
+
+err_setup:
+	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+	return err;
+}
+
+static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+{
+       switch (stringset) {
+       case ETH_SS_TEST:
+	       return IXGBE_TEST_LEN;
+       case ETH_SS_STATS:
+	       return IXGBE_GLOBAL_STATS_LEN;
+       default:
+	       return -EINVAL;
+       }
+}
+
+static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
+				      struct ethtool_stats *stats, u64 *data)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	ixgbevf_update_stats(adapter);
+	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+		char *p = (char *)adapter +
+			ixgbe_gstrings_stats[i].stat_offset;
+		char *b = (char *)adapter +
+			ixgbe_gstrings_stats[i].base_stat_offset;
+		data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
+			    sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
+			  ((ixgbe_gstrings_stats[i].sizeof_stat ==
+			    sizeof(u64)) ? *(u64 *)b : *(u32 *)b);
+	}
+}
+
+static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
+				u8 *data)
+{
+	char *p = (char *)data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *ixgbe_gstrings_test,
+		       IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	bool link_up;
+	u32 link_speed = 0;
+	*data = 0;
+
+	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+	if (link_up)
+		return *data;
+	else
+		*data = 1;
+	return *data;
+}
+
+/* ethtool register test data */
+struct ixgbevf_reg_test {
+	u16 reg;
+	u8  array_len;
+	u8  test_type;
+	u32 mask;
+	u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables.  We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST	1
+#define SET_READ_TEST	2
+#define WRITE_NO_TEST	3
+#define TABLE32_TEST	4
+#define TABLE64_TEST_LO	5
+#define TABLE64_TEST_HI	6
+
+/* default VF register test */
+static struct ixgbevf_reg_test reg_test_vf[] = {
+	{ IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+	{ IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+	{ IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+	{ IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+	{ 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W)                                             \
+{                                                                             \
+	u32 pat, val, before;                                                 \
+	const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {                       \
+		before = readl(adapter->hw.hw_addr + R);                      \
+		writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
+		val = readl(adapter->hw.hw_addr + R);                         \
+		if (val != (_test[pat] & W & M)) {                            \
+			hw_dbg(&adapter->hw,                                  \
+			"pattern test reg %04X failed: got "                  \
+			"0x%08X expected 0x%08X\n",                           \
+			R, val, (_test[pat] & W & M));                        \
+			*data = R;                                            \
+			writel(before, adapter->hw.hw_addr + R);              \
+			return 1;                                             \
+		}                                                             \
+		writel(before, adapter->hw.hw_addr + R);                      \
+	}                                                                     \
+}
+
+#define REG_SET_AND_CHECK(R, M, W)                                            \
+{                                                                             \
+	u32 val, before;                                                      \
+	before = readl(adapter->hw.hw_addr + R);                              \
+	writel((W & M), (adapter->hw.hw_addr + R));                           \
+	val = readl(adapter->hw.hw_addr + R);                                 \
+	if ((W & M) != (val & M)) {                                           \
+		printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
+				 "expected 0x%08X\n", R, (val & M), (W & M)); \
+		*data = R;                                                    \
+		writel(before, (adapter->hw.hw_addr + R));                    \
+		return 1;                                                     \
+	}                                                                     \
+	writel(before, (adapter->hw.hw_addr + R));                            \
+}
+
+static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+	struct ixgbevf_reg_test *test;
+	u32 i;
+
+	test = reg_test_vf;
+
+	/*
+	 * Perform the register test, looping through the test table
+	 * until we either fail or reach the null entry.
+	 */
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				REG_PATTERN_TEST(test->reg + (i * 0x40),
+						test->mask,
+						test->write);
+				break;
+			case SET_READ_TEST:
+				REG_SET_AND_CHECK(test->reg + (i * 0x40),
+						test->mask,
+						test->write);
+				break;
+			case WRITE_NO_TEST:
+				writel(test->write,
+				       (adapter->hw.hw_addr + test->reg)
+				       + (i * 0x40));
+				break;
+			case TABLE32_TEST:
+				REG_PATTERN_TEST(test->reg + (i * 4),
+						test->mask,
+						test->write);
+				break;
+			case TABLE64_TEST_LO:
+				REG_PATTERN_TEST(test->reg + (i * 8),
+						test->mask,
+						test->write);
+				break;
+			case TABLE64_TEST_HI:
+				REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+						test->mask,
+						test->write);
+				break;
+			}
+		}
+		test++;
+	}
+
+	*data = 0;
+	return *data;
+}
+
+static void ixgbevf_diag_test(struct net_device *netdev,
+			      struct ethtool_test *eth_test, u64 *data)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	bool if_running = netif_running(netdev);
+
+	set_bit(__IXGBEVF_TESTING, &adapter->state);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		/* Offline tests */
+
+		hw_dbg(&adapter->hw, "offline testing starting\n");
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result */
+		if (ixgbevf_link_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		if (if_running)
+			/* indicate we're in test mode */
+			dev_close(netdev);
+		else
+			ixgbevf_reset(adapter);
+
+		hw_dbg(&adapter->hw, "register testing starting\n");
+		if (ixgbevf_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		ixgbevf_reset(adapter);
+
+		clear_bit(__IXGBEVF_TESTING, &adapter->state);
+		if (if_running)
+			dev_open(netdev);
+	} else {
+		hw_dbg(&adapter->hw, "online testing starting\n");
+		/* Online tests */
+		if (ixgbevf_link_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Online tests aren't run; pass by default */
+		data[0] = 0;
+
+		clear_bit(__IXGBEVF_TESTING, &adapter->state);
+	}
+	msleep_interruptible(4 * 1000);
+}
+
+static int ixgbevf_nway_reset(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev)) {
+		if (!adapter->dev_closed)
+			ixgbevf_reinit_locked(adapter);
+	}
+
+	return 0;
+}
+
+static u32 ixgbevf_get_link(struct net_device *dev)
+{
+	return netif_carrier_ok(dev) ? 1 : 0;
+}
+
+static struct ethtool_ops ixgbevf_ethtool_ops = {
+	.get_settings           = ixgbevf_get_settings,
+	.set_settings           = ixgbevf_set_settings,
+	.get_drvinfo            = ixgbevf_get_drvinfo,
+	.get_regs_len           = ixgbevf_get_regs_len,
+	.get_regs               = ixgbevf_get_regs,
+	.nway_reset             = ixgbevf_nway_reset,
+	.get_link               = ixgbevf_get_link,
+	.get_eeprom             = ixgbevf_get_eeprom,
+	.set_eeprom             = ixgbevf_set_eeprom,
+	.get_ringparam          = ixgbevf_get_ringparam,
+	.set_ringparam          = ixgbevf_set_ringparam,
+	.get_rx_csum            = ixgbevf_get_rx_csum,
+	.set_rx_csum            = ixgbevf_set_rx_csum,
+	.get_tx_csum            = ixgbevf_get_tx_csum,
+	.set_tx_csum            = ixgbevf_set_tx_csum,
+	.get_sg                 = ethtool_op_get_sg,
+	.set_sg                 = ethtool_op_set_sg,
+	.get_msglevel           = ixgbevf_get_msglevel,
+	.set_msglevel           = ixgbevf_set_msglevel,
+	.get_tso                = ethtool_op_get_tso,
+	.set_tso                = ixgbevf_set_tso,
+	.self_test              = ixgbevf_diag_test,
+	.get_sset_count         = ixgbevf_get_sset_count,
+	.get_strings            = ixgbevf_get_strings,
+	.get_ethtool_stats      = ixgbevf_get_ethtool_stats,
+};
+
+void ixgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+}
+#endif /* SIOCETHTOOL */
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 0000000..c483386
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,318 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@...ts.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_H_
+#define _IXGBEVF_H_
+
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+
+#include "vf.h"
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbevf_tx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	u16 length;
+	u16 next_to_watch;
+	u16 mapped_as_page;
+};
+
+struct ixgbevf_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct page *page;
+	dma_addr_t page_dma;
+	unsigned int page_offset;
+};
+
+struct ixgbevf_ring {
+	struct ixgbevf_adapter *adapter;  /* backlink */
+	void *desc;			/* descriptor ring memory */
+	dma_addr_t dma;			/* phys. address of descriptor ring */
+	unsigned int size;		/* length in bytes */
+	unsigned int count;		/* amount of descriptors */
+	unsigned int next_to_use;
+	unsigned int next_to_clean;
+
+	int queue_index; /* needed for multiqueue queue management */
+	union {
+		struct ixgbevf_tx_buffer *tx_buffer_info;
+		struct ixgbevf_rx_buffer *rx_buffer_info;
+	};
+
+	u16 head;
+	u16 tail;
+
+	unsigned int total_bytes;
+	unsigned int total_packets;
+
+	u16 reg_idx; /* holds the special value that gets the hardware register
+		      * offset associated with this ring, which is different
+		      * for DCB and RSS modes */
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	/* cpu for tx queue */
+	int cpu;
+#endif
+
+	u64 v_idx; /* maps directly to the index for this ring in the hardware
+		    * vector array, can also be used for finding the bit in EICR
+		    * and friends that represents the vector for this ring */
+
+	u16 work_limit;                /* max work per interrupt */
+	u16 rx_buf_len;
+};
+
+enum ixgbevf_ring_f_enum {
+	RING_F_NONE = 0,
+	RING_F_ARRAY_SIZE      /* must be last in enum set */
+};
+
+struct ixgbevf_ring_feature {
+	int indices;
+	int mask;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBEVF_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define MAX_RX_QUEUES 1
+#define MAX_TX_QUEUES 1
+
+#define IXGBEVF_DEFAULT_TXD   1024
+#define IXGBEVF_DEFAULT_RXD   512
+#define IXGBEVF_MAX_TXD       4096
+#define IXGBEVF_MIN_TXD       64
+#define IXGBEVF_MAX_RXD       4096
+#define IXGBEVF_MIN_RXD       64
+
+/* Supported Rx Buffer Sizes */
+#define IXGBEVF_RXBUFFER_64    64     /* Used for packet split */
+#define IXGBEVF_RXBUFFER_128   128    /* Used for packet split */
+#define IXGBEVF_RXBUFFER_256   256    /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048  2048
+#define IXGBEVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
+
+#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define IXGBE_TX_FLAGS_CSUM		(u32)(1)
+#define IXGBE_TX_FLAGS_VLAN		(u32)(1 << 1)
+#define IXGBE_TX_FLAGS_TSO		(u32)(1 << 2)
+#define IXGBE_TX_FLAGS_IPV4		(u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE		(u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO		(u32)(1 << 5)
+#define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0x0000e000
+#define IXGBE_TX_FLAGS_VLAN_SHIFT	16
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbevf_q_vector {
+	struct ixgbevf_adapter *adapter;
+	struct napi_struct napi;
+	DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
+	DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+	u8 rxr_count;     /* Rx ring count assigned to this vector */
+	u8 txr_count;     /* Tx ring count assigned to this vector */
+	u8 tx_itr;
+	u8 rx_itr;
+	u32 eitr;
+	int v_idx;	  /* vector index in list */
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways.  The lowest value
+ * supported by all of the ixgbe hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IXGBE_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGBE_RX_DESC_ADV(R, i)	    \
+	(&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i)	    \
+	(&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i)	    \
+	(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT 2
+
+#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* board specific private data structure */
+struct ixgbevf_adapter {
+	struct timer_list watchdog_timer;
+#ifdef NETIF_F_HW_VLAN_TX
+	struct vlan_group *vlgrp;
+#endif
+	u16 bd_number;
+	struct work_struct reset_task;
+	struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+	char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+	/* Interrupt Throttle Rate */
+	u32 itr_setting;
+	u16 eitr_low;
+	u16 eitr_high;
+
+	/* TX */
+	struct ixgbevf_ring *tx_ring;	/* One per active queue */
+	int num_tx_queues;
+	u64 restart_queue;
+	u64 hw_csum_tx_good;
+	u64 lsc_int;
+	u64 hw_tso_ctxt;
+	u64 hw_tso6_ctxt;
+	u32 tx_timeout_count;
+	bool detect_tx_hung;
+
+	/* RX */
+	struct ixgbevf_ring *rx_ring;	/* One per active queue */
+	int num_rx_queues;
+	int num_rx_pools;               /* == num_rx_queues in 82598 */
+	int num_rx_queues_per_pool;	/* 1 if 82598, can be many if 82599 */
+	u64 hw_csum_rx_error;
+	u64 hw_rx_no_dma_resources;
+	u64 hw_csum_rx_good;
+	u64 non_eop_descs;
+	int num_msix_vectors;
+	int max_msix_q_vectors;         /* true count of q_vectors for device */
+	struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+	struct msix_entry *msix_entries;
+
+	u64 rx_hdr_split;
+	u32 alloc_rx_page_failed;
+	u32 alloc_rx_buff_failed;
+
+	/* Some features need tri-state capability,
+	 * thus the additional *_CAPABLE flags.
+	 */
+	u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 1)
+#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 4)
+#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
+#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 6)
+#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 7)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1 << 8)
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	/* structs defined in ixgbe_vf.h */
+	struct ixgbe_hw hw;
+	u16 msg_enable;
+	struct ixgbevf_hw_stats stats;
+	u64 zero_base;
+	/* Interrupt Throttle Rate */
+	u32 eitr_param;
+
+	unsigned long state;
+	u32 *config_space;
+	u64 tx_busy;
+	unsigned int tx_ring_count;
+	unsigned int rx_ring_count;
+
+	u32 link_speed;
+	bool link_up;
+	unsigned long link_check_timeout;
+
+	struct work_struct watchdog_task;
+	bool netdev_registered;
+	bool dev_closed;
+};
+
+enum ixbgevf_state_t {
+	__IXGBEVF_TESTING,
+	__IXGBEVF_RESETTING,
+	__IXGBEVF_DOWN
+};
+
+enum ixgbevf_boards {
+	board_82599_vf,
+};
+
+extern struct ixgbevf_info ixgbevf_vf_info;
+extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_version[];
+
+extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
+				      struct ixgbevf_ring *);
+extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
+				      struct ixgbevf_ring *);
+extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
+				      struct ixgbevf_ring *);
+extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
+				      struct ixgbevf_ring *);
+extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+
+#endif
+extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+
+#ifdef DEBUG
+extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+#define hw_dbg(hw, format, arg...) \
+	printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg)
+#else
+#define hw_dbg(hw, format, arg...) do {} while (0)
+#endif
+
+#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 0000000..8816156
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3588 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@...ts.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include "ixgbevf.h"
+
+char ixgbevf_driver_name[] = "ixgbevf";
+static const char ixgbevf_driver_string[] =
+	"Intel(R) 82599 Virtual Function";
+
+#define DRV_VERSION "1.0.0-k0"
+const char ixgbevf_driver_version[] = DRV_VERSION;
+static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+
+static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+	[board_82599_vf] = &ixgbevf_vf_info,
+};
+
+/* ixgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ *   Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgbevf_pci_tbl[] = {
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
+	board_82599_vf},
+
+	/* required last entry */
+	{0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@...el.com>");
+MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+/* forward decls */
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+			       u32 itr_reg);
+
+/**
+ *  ixgbe_validate_mac_addr - Validate MAC address
+ *  @mac_addr: pointer to MAC address.
+ *
+ *  Tests a MAC address to ensure it is a valid Individual Address
+ **/
+static s32 ixgbevf_validate_mac_addr(u8 *mac_addr)
+{
+	s32 status = 0;
+
+	/* Make sure it is not a multicast address */
+	if (IXGBE_IS_MULTICAST(mac_addr))
+		status = IXGBE_ERR_INVALID_MAC_ADDR;
+	/* Not a broadcast address */
+	else if (IXGBE_IS_BROADCAST(mac_addr))
+		status = IXGBE_ERR_INVALID_MAC_ADDR;
+	/* Reject the zero address */
+	else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+		 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
+		status = IXGBE_ERR_INVALID_MAC_ADDR;
+	return status;
+}
+
+static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
+					   struct ixgbevf_ring *rx_ring,
+					   u32 val)
+{
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+}
+
+/*
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
+			     u8 queue, u8 msix_vector)
+{
+	u32 ivar, index;
+	struct ixgbe_hw *hw = &adapter->hw;
+	if (direction == -1) {
+		/* other causes */
+		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+		ivar &= ~0xFF;
+		ivar |= msix_vector;
+		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+	} else {
+		/* tx or rx causes */
+		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+		index = ((16 * (queue & 1)) + (8 * direction));
+		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+		ivar &= ~(0xFF << index);
+		ivar |= (msix_vector << index);
+		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+	}
+}
+
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+					       struct ixgbevf_tx_buffer
+					       *tx_buffer_info)
+{
+	if (tx_buffer_info->dma) {
+		if (tx_buffer_info->mapped_as_page)
+			pci_unmap_page(adapter->pdev,
+				       tx_buffer_info->dma,
+				       tx_buffer_info->length,
+				       PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(adapter->pdev,
+					 tx_buffer_info->dma,
+					 tx_buffer_info->length,
+					 PCI_DMA_TODEVICE);
+		tx_buffer_info->dma = 0;
+	}
+	if (tx_buffer_info->skb) {
+		dev_kfree_skb_any(tx_buffer_info->skb);
+		tx_buffer_info->skb = NULL;
+	}
+	tx_buffer_info->time_stamp = 0;
+	/* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
+					 struct ixgbevf_ring *tx_ring,
+					 unsigned int eop)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 head, tail;
+
+	/* Detect a transmit hang in hardware, this serializes the
+	 * check with the clearing of time_stamp and movement of eop */
+	head = readl(hw->hw_addr + tx_ring->head);
+	tail = readl(hw->hw_addr + tx_ring->tail);
+	adapter->detect_tx_hung = false;
+	if ((head != tail) &&
+	    tx_ring->tx_buffer_info[eop].time_stamp &&
+	    time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
+		/* detected Tx unit hang */
+		union ixgbe_adv_tx_desc *tx_desc;
+		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+		printk(KERN_ERR "Detected Tx Unit Hang\n"
+		       "  Tx Queue             <%d>\n"
+		       "  TDH, TDT             <%x>, <%x>\n"
+		       "  next_to_use          <%x>\n"
+		       "  next_to_clean        <%x>\n"
+		       "tx_buffer_info[next_to_clean]\n"
+		       "  time_stamp           <%lx>\n"
+		       "  jiffies              <%lx>\n",
+		       tx_ring->queue_index,
+		       head, tail,
+		       tx_ring->next_to_use, eop,
+		       tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+		return true;
+	}
+
+	return false;
+}
+
+#define IXGBE_MAX_TXD_PWR	14
+#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+			 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)      /* for context */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
+#endif
+
+static void ixgbevf_tx_timeout(struct net_device *netdev);
+
+/**
+ * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+				 struct ixgbevf_ring *tx_ring)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
+	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	unsigned int i, eop, count = 0;
+	unsigned int total_bytes = 0, total_packets = 0;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->tx_buffer_info[i].next_to_watch;
+	eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+
+	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+	       (count < tx_ring->work_limit)) {
+		bool cleaned = false;
+		for ( ; !cleaned; count++) {
+			struct sk_buff *skb;
+			tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+			tx_buffer_info = &tx_ring->tx_buffer_info[i];
+			cleaned = (i == eop);
+			skb = tx_buffer_info->skb;
+
+			if (cleaned && skb) {
+				unsigned int segs, bytecount;
+
+				/* gso_segs is currently only valid for tcp */
+				segs = skb_shinfo(skb)->gso_segs ?: 1;
+				/* multiply data chunks by size of headers */
+				bytecount = ((segs - 1) * skb_headlen(skb)) +
+					    skb->len;
+				total_packets += segs;
+				total_bytes += bytecount;
+			}
+
+			ixgbevf_unmap_and_free_tx_resource(adapter,
+							   tx_buffer_info);
+
+			tx_desc->wb.status = 0;
+
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+		}
+
+		eop = tx_ring->tx_buffer_info[i].next_to_watch;
+		eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (unlikely(count && netif_carrier_ok(netdev) &&
+		     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+#ifdef HAVE_TX_MQ
+		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+		    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+			netif_wake_subqueue(netdev, tx_ring->queue_index);
+			++adapter->restart_queue;
+		}
+#else
+		if (netif_queue_stopped(netdev) &&
+		    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+			netif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+#endif
+	}
+
+	if (adapter->detect_tx_hung) {
+		if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
+			/* schedule immediate reset if we believe we hung */
+			printk(KERN_INFO
+			       "tx hang %d detected, resetting adapter\n",
+			       adapter->tx_timeout_count + 1);
+			ixgbevf_tx_timeout(adapter->netdev);
+		}
+	}
+
+	/* re-arm the interrupt */
+	if ((count >= tx_ring->work_limit) &&
+	    (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
+		IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
+	}
+
+	tx_ring->total_bytes += total_bytes;
+	tx_ring->total_packets += total_packets;
+
+	adapter->net_stats.tx_bytes += total_bytes;
+	adapter->net_stats.tx_packets += total_packets;
+
+	return (count < tx_ring->work_limit);
+}
+
+/**
+ * ixgbevf_receive_skb - Send a completed packet up the stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
+				struct sk_buff *skb, u8 status,
+				struct ixgbevf_ring *ring,
+				union ixgbe_adv_rx_desc *rx_desc)
+{
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+	int ret;
+
+	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+		if (adapter->vlgrp && is_vlan)
+			vlan_gro_receive(&q_vector->napi,
+					 adapter->vlgrp,
+					 tag, skb);
+		else
+			napi_gro_receive(&q_vector->napi, skb);
+	} else {
+		if (adapter->vlgrp && is_vlan)
+			ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+		else
+			ret = netif_rx(skb);
+	}
+}
+
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
+static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+				       u32 status_err, struct sk_buff *skb)
+{
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* Rx csum disabled */
+	if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+		return;
+
+	/* if IP and error */
+	if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+	    (status_err & IXGBE_RXDADV_ERR_IPE)) {
+		adapter->hw_csum_rx_error++;
+		return;
+	}
+
+	if (!(status_err & IXGBE_RXD_STAT_L4CS))
+		return;
+
+	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+		adapter->hw_csum_rx_error++;
+		return;
+	}
+
+	/* It must be a TCP or UDP packet with a valid checksum */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	adapter->hw_csum_rx_good++;
+}
+
+/**
+ * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
+				     struct ixgbevf_ring *rx_ring,
+				     int cleaned_count)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	union ixgbe_adv_rx_desc *rx_desc;
+	struct ixgbevf_rx_buffer *bi;
+	struct sk_buff *skb;
+	unsigned int i;
+	unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	bi = &rx_ring->rx_buffer_info[i];
+
+	while (cleaned_count--) {
+		rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+		if (!bi->page_dma &&
+		    (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+			if (!bi->page) {
+				bi->page = netdev_alloc_page(adapter->netdev);
+				if (!bi->page) {
+					adapter->alloc_rx_page_failed++;
+					goto no_buffers;
+				}
+				bi->page_offset = 0;
+			} else {
+				/* use a half page if we're re-using */
+				bi->page_offset ^= (PAGE_SIZE / 2);
+			}
+
+			bi->page_dma = pci_map_page(pdev, bi->page,
+						    bi->page_offset,
+						    (PAGE_SIZE / 2),
+						    PCI_DMA_FROMDEVICE);
+		}
+
+		skb = bi->skb;
+		if (!skb) {
+			skb = netdev_alloc_skb(adapter->netdev,
+							       bufsz);
+
+			if (!skb) {
+				adapter->alloc_rx_buff_failed++;
+				goto no_buffers;
+			}
+
+			/*
+			 * Make buffer alignment 2 beyond a 16 byte boundary
+			 * this will result in a 16 byte aligned IP header after
+			 * the 14 byte MAC header is removed
+			 */
+			skb_reserve(skb, NET_IP_ALIGN);
+
+			bi->skb = skb;
+		}
+		if (!bi->dma) {
+			bi->dma = pci_map_single(pdev, skb->data,
+						 rx_ring->rx_buf_len,
+						 PCI_DMA_FROMDEVICE);
+		}
+		/* Refresh the desc even if buffer_addrs didn't change because
+		 * each write-back erases this info. */
+		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+		} else {
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+		}
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		bi = &rx_ring->rx_buffer_info[i];
+	}
+
+no_buffers:
+	if (rx_ring->next_to_use != i) {
+		rx_ring->next_to_use = i;
+		if (i-- == 0)
+			i = (rx_ring->count - 1);
+
+		ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
+	}
+}
+
+static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
+					     u64 qmask)
+{
+	u32 mask;
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	mask = (qmask & 0xFFFFFFFF);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+	return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+	return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
+static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+				 struct ixgbevf_ring *rx_ring,
+				 int *work_done, int work_to_do)
+{
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	struct pci_dev *pdev = adapter->pdev;
+	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+	struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
+	struct sk_buff *skb;
+	unsigned int i;
+	u32 len, staterr;
+	u16 hdr_info;
+	bool cleaned = false;
+	int cleaned_count = 0;
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+	while (staterr & IXGBE_RXD_STAT_DD) {
+		u32 upper_len = 0;
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+
+		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+			hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
+			len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+			       IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+			if (hdr_info & IXGBE_RXDADV_SPH)
+				adapter->rx_hdr_split++;
+			if (len > IXGBEVF_RX_HDR_SIZE)
+				len = IXGBEVF_RX_HDR_SIZE;
+			upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+		} else {
+			len = le16_to_cpu(rx_desc->wb.upper.length);
+		}
+		cleaned = true;
+		skb = rx_buffer_info->skb;
+		prefetch(skb->data - NET_IP_ALIGN);
+		rx_buffer_info->skb = NULL;
+
+		if (rx_buffer_info->dma) {
+			pci_unmap_single(pdev, rx_buffer_info->dma,
+					 rx_ring->rx_buf_len,
+					 PCI_DMA_FROMDEVICE);
+			rx_buffer_info->dma = 0;
+			skb_put(skb, len);
+		}
+
+		if (upper_len) {
+			pci_unmap_page(pdev, rx_buffer_info->page_dma,
+				       PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+			rx_buffer_info->page_dma = 0;
+			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+					   rx_buffer_info->page,
+					   rx_buffer_info->page_offset,
+					   upper_len);
+
+			if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+			    (page_count(rx_buffer_info->page) != 1))
+				rx_buffer_info->page = NULL;
+			else
+				get_page(rx_buffer_info->page);
+
+			skb->len += upper_len;
+			skb->data_len += upper_len;
+			skb->truesize += upper_len;
+		}
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+
+		next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+		prefetch(next_rxd);
+		cleaned_count++;
+
+		next_buffer = &rx_ring->rx_buffer_info[i];
+
+		if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+			if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+				rx_buffer_info->skb = next_buffer->skb;
+				rx_buffer_info->dma = next_buffer->dma;
+				next_buffer->skb = skb;
+				next_buffer->dma = 0;
+			} else {
+				skb->next = next_buffer->skb;
+				skb->next->prev = skb;
+			}
+			adapter->non_eop_descs++;
+			goto next_desc;
+		}
+
+		/* ERR_MASK will only have valid bits if EOP set */
+		if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		ixgbevf_rx_checksum(adapter, staterr, skb);
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+		total_rx_packets++;
+
+		/*
+		 * Work around issue of some types of VM to VM loop back
+		 * packets not getting split correctly
+		 */
+		if (staterr & IXGBE_RXD_STAT_LB) {
+			u32 header_fixup_len = skb->len - skb->data_len;
+			if (header_fixup_len < 14)
+				skb_push(skb, header_fixup_len);
+		}
+		skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+		ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+		adapter->netdev->last_rx = jiffies;
+
+next_desc:
+		rx_desc->wb.upper.status_error = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+			ixgbevf_alloc_rx_buffers(adapter, rx_ring,
+						 cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	}
+
+	rx_ring->next_to_clean = i;
+	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+
+	if (cleaned_count)
+		ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+	rx_ring->total_packets += total_rx_packets;
+	rx_ring->total_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+
+	return cleaned;
+}
+
+/**
+ * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+{
+	struct ixgbevf_q_vector *q_vector =
+		container_of(napi, struct ixgbevf_q_vector, napi);
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	struct ixgbevf_ring *rx_ring = NULL;
+	int work_done = 0;
+	long r_idx;
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rx_ring = &(adapter->rx_ring[r_idx]);
+
+	ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+
+	/* If all Rx work done, exit the polling mode */
+	if (work_done < budget) {
+		napi_complete(napi);
+		if (adapter->itr_setting & 1)
+			ixgbevf_set_itr_msix(q_vector);
+		if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+			ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
+	}
+
+	return work_done;
+}
+
+/**
+ * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+{
+	struct ixgbevf_q_vector *q_vector =
+		container_of(napi, struct ixgbevf_q_vector, napi);
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	struct ixgbevf_ring *rx_ring = NULL;
+	int work_done = 0, i;
+	long r_idx;
+	u64 enable_mask = 0;
+
+	/* attempt to distribute budget to each queue fairly, but don't allow
+	 * the budget to go below 1 because we'll exit polling */
+	budget /= (q_vector->rxr_count ?: 1);
+	budget = max(budget, 1);
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		rx_ring = &(adapter->rx_ring[r_idx]);
+		ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+		enable_mask |= rx_ring->v_idx;
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+				      r_idx + 1);
+	}
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+	if (!netif_running(adapter->netdev))
+		work_done = 0;
+
+#endif
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rx_ring = &(adapter->rx_ring[r_idx]);
+
+	/* If all Rx work done, exit the polling mode */
+	if (work_done < budget) {
+		napi_complete(napi);
+		if (adapter->itr_setting & 1)
+			ixgbevf_set_itr_msix(q_vector);
+		if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+			ixgbevf_irq_enable_queues(adapter, enable_mask);
+	}
+
+	return work_done;
+}
+
+
+/**
+ * ixgbevf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbevf_q_vector *q_vector;
+	struct ixgbe_hw *hw = &adapter->hw;
+	int i, j, q_vectors, v_idx, r_idx;
+	u32 mask;
+
+	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	/*
+	 * Populate the IVAR table and set the ITR values to the
+	 * corresponding register.
+	 */
+	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+		q_vector = adapter->q_vector[v_idx];
+		/* XXX for_each_bit(...) */
+		r_idx = find_first_bit(q_vector->rxr_idx,
+				       adapter->num_rx_queues);
+
+		for (i = 0; i < q_vector->rxr_count; i++) {
+			j = adapter->rx_ring[r_idx].reg_idx;
+			ixgbevf_set_ivar(adapter, 0, j, v_idx);
+			r_idx = find_next_bit(q_vector->rxr_idx,
+					      adapter->num_rx_queues,
+					      r_idx + 1);
+		}
+		r_idx = find_first_bit(q_vector->txr_idx,
+				       adapter->num_tx_queues);
+
+		for (i = 0; i < q_vector->txr_count; i++) {
+			j = adapter->tx_ring[r_idx].reg_idx;
+			ixgbevf_set_ivar(adapter, 1, j, v_idx);
+			r_idx = find_next_bit(q_vector->txr_idx,
+					      adapter->num_tx_queues,
+					      r_idx + 1);
+		}
+
+		/* if this is a tx only vector halve the interrupt rate */
+		if (q_vector->txr_count && !q_vector->rxr_count)
+			q_vector->eitr = (adapter->eitr_param >> 1);
+		else if (q_vector->rxr_count)
+			/* rx only */
+			q_vector->eitr = adapter->eitr_param;
+
+		ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+	}
+
+	ixgbevf_set_ivar(adapter, -1, 1, v_idx);
+
+	/* set up to autoclear timer, and the vectors */
+	mask = IXGBE_EIMS_ENABLE_MASK;
+	mask &= ~IXGBE_EIMS_OTHER;
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * ixgbevf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ **/
+static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
+			     u32 eitr, u8 itr_setting,
+			     int packets, int bytes)
+{
+	unsigned int retval = itr_setting;
+	u32 timepassed_us;
+	u64 bytes_perint;
+
+	if (packets == 0)
+		goto update_itr_done;
+
+
+	/* simple throttlerate management
+	 *    0-20MB/s lowest (100000 ints/s)
+	 *   20-100MB/s low   (20000 ints/s)
+	 *  100-1249MB/s bulk (8000 ints/s)
+	 */
+	/* what was last interrupt timeslice? */
+	timepassed_us = 1000000/eitr;
+	bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+	switch (itr_setting) {
+	case lowest_latency:
+		if (bytes_perint > adapter->eitr_low)
+			retval = low_latency;
+		break;
+	case low_latency:
+		if (bytes_perint > adapter->eitr_high)
+			retval = bulk_latency;
+		else if (bytes_perint <= adapter->eitr_low)
+			retval = lowest_latency;
+		break;
+	case bulk_latency:
+		if (bytes_perint <= adapter->eitr_high)
+			retval = low_latency;
+		break;
+	}
+
+update_itr_done:
+	return retval;
+}
+
+/**
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @adapter: pointer to adapter struct
+ * @v_idx: vector index into q_vector array
+ * @itr_reg: new value to be written in *register* format, not ints/s
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update VTEITR registers at runtime.  Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+			       u32 itr_reg)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+
+	/*
+	 * set the WDIS bit to not clear the timer bits and cause an
+	 * immediate assertion of the interrupt
+	 */
+	itr_reg |= IXGBE_EITR_CNT_WDIS;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
+}
+
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
+{
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	u32 new_itr;
+	u8 current_itr, ret_itr;
+	int i, r_idx, v_idx = q_vector->v_idx;
+	struct ixgbevf_ring *rx_ring, *tx_ring;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	for (i = 0; i < q_vector->txr_count; i++) {
+		tx_ring = &(adapter->tx_ring[r_idx]);
+		ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+					     q_vector->tx_itr,
+					     tx_ring->total_packets,
+					     tx_ring->total_bytes);
+		/* if the result for this queue would decrease interrupt
+		 * rate for this vector then use that result */
+		q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+				    q_vector->tx_itr - 1 : ret_itr);
+		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+				      r_idx + 1);
+	}
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		rx_ring = &(adapter->rx_ring[r_idx]);
+		ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+					     q_vector->rx_itr,
+					     rx_ring->total_packets,
+					     rx_ring->total_bytes);
+		/* if the result for this queue would decrease interrupt
+		 * rate for this vector then use that result */
+		q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+				    q_vector->rx_itr - 1 : ret_itr);
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+				      r_idx + 1);
+	}
+
+	current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = 100000;
+		break;
+	case low_latency:
+		new_itr = 20000; /* aka hwitr = ~200 */
+		break;
+	case bulk_latency:
+	default:
+		new_itr = 8000;
+		break;
+	}
+
+	if (new_itr != q_vector->eitr) {
+		u32 itr_reg;
+
+		/* save the algorithm value here, not the smoothed one */
+		q_vector->eitr = new_itr;
+		/* do an exponential smoothing */
+		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+		itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+		ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+	}
+
+	return;
+}
+
+static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 eicr;
+
+	eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
+{
+	struct ixgbevf_q_vector *q_vector = data;
+	struct ixgbevf_adapter  *adapter = q_vector->adapter;
+	struct ixgbevf_ring     *tx_ring;
+	int i, r_idx;
+
+	if (!q_vector->txr_count)
+		return IRQ_HANDLED;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	for (i = 0; i < q_vector->txr_count; i++) {
+		tx_ring = &(adapter->tx_ring[r_idx]);
+		tx_ring->total_bytes = 0;
+		tx_ring->total_packets = 0;
+		ixgbevf_clean_tx_irq(adapter, tx_ring);
+		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+				      r_idx + 1);
+	}
+
+	if (adapter->itr_setting & 1)
+		ixgbevf_set_itr_msix(q_vector);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
+static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+{
+	struct ixgbevf_q_vector *q_vector = data;
+	struct ixgbevf_adapter  *adapter = q_vector->adapter;
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbevf_ring  *rx_ring;
+	int r_idx;
+	int i;
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		rx_ring = &(adapter->rx_ring[r_idx]);
+		rx_ring->total_bytes = 0;
+		rx_ring->total_packets = 0;
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+				      r_idx + 1);
+	}
+
+	if (!q_vector->rxr_count)
+		return IRQ_HANDLED;
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rx_ring = &(adapter->rx_ring[r_idx]);
+	/* disable interrupts on this vector only */
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
+	napi_schedule(&q_vector->napi);
+
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
+{
+	ixgbevf_msix_clean_rx(irq, data);
+	ixgbevf_msix_clean_tx(irq, data);
+
+	return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
+				     int r_idx)
+{
+	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+	set_bit(r_idx, q_vector->rxr_idx);
+	q_vector->rxr_count++;
+	a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
+				     int t_idx)
+{
+	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+	set_bit(t_idx, q_vector->txr_idx);
+	q_vector->txr_count++;
+	a->tx_ring[t_idx].v_idx = 1 << v_idx;
+}
+
+/**
+ * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code.  Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible.  You would add new
+ * mapping configurations in here.
+ **/
+static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
+{
+	int q_vectors;
+	int v_start = 0;
+	int rxr_idx = 0, txr_idx = 0;
+	int rxr_remaining = adapter->num_rx_queues;
+	int txr_remaining = adapter->num_tx_queues;
+	int i, j;
+	int rqpv, tqpv;
+	int err = 0;
+
+	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	/*
+	 * The ideal configuration...
+	 * We have enough vectors to map one per queue.
+	 */
+	if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+			map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+		for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+			map_vector_to_txq(adapter, v_start, txr_idx);
+		goto out;
+	}
+
+	/*
+	 * If we don't have enough vectors for a 1-to-1
+	 * mapping, we'll have to group them so there are
+	 * multiple queues per vector.
+	 */
+	/* Re-adjusting *qpv takes care of the remainder. */
+	for (i = v_start; i < q_vectors; i++) {
+		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+		for (j = 0; j < rqpv; j++) {
+			map_vector_to_rxq(adapter, i, rxr_idx);
+			rxr_idx++;
+			rxr_remaining--;
+		}
+	}
+	for (i = v_start; i < q_vectors; i++) {
+		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+		for (j = 0; j < tqpv; j++) {
+			map_vector_to_txq(adapter, i, txr_idx);
+			txr_idx++;
+			txr_remaining--;
+		}
+	}
+
+out:
+	return err;
+}
+
+/**
+ * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	irqreturn_t (*handler)(int, void *);
+	int i, vector, q_vectors, err;
+	int ri = 0, ti = 0;
+
+	/* Decrement for Other and TCP Timer vectors */
+	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)          \
+					  ? &ixgbevf_msix_clean_many : \
+			  (_v)->rxr_count ? &ixgbevf_msix_clean_rx   : \
+			  (_v)->txr_count ? &ixgbevf_msix_clean_tx   : \
+			  NULL)
+	for (vector = 0; vector < q_vectors; vector++) {
+		handler = SET_HANDLER(adapter->q_vector[vector]);
+
+		if (handler == &ixgbevf_msix_clean_rx) {
+			sprintf(adapter->name[vector], "%s-%s-%d",
+				netdev->name, "rx", ri++);
+		} else if (handler == &ixgbevf_msix_clean_tx) {
+			sprintf(adapter->name[vector], "%s-%s-%d",
+				netdev->name, "tx", ti++);
+		} else if (handler == &ixgbevf_msix_clean_many) {
+			sprintf(adapter->name[vector], "%s-%s-%d",
+				netdev->name, "TxRx", vector);
+		} else {
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(adapter->msix_entries[vector].vector,
+				  handler, 0, adapter->name[vector],
+				  adapter->q_vector[vector]);
+		if (err) {
+			hw_dbg(&adapter->hw,
+			       "request_irq failed for MSIX interrupt "
+			       "Error: %d\n", err);
+			goto free_queue_irqs;
+		}
+	}
+
+	sprintf(adapter->name[vector], "%s:mbx", netdev->name);
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+	if (err) {
+		hw_dbg(&adapter->hw,
+		       "request_irq for msix_mbx failed: %d\n", err);
+		goto free_queue_irqs;
+	}
+
+	return 0;
+
+free_queue_irqs:
+	for (i = vector - 1; i >= 0; i--)
+		free_irq(adapter->msix_entries[--vector].vector,
+			 &(adapter->q_vector[i]));
+	pci_disable_msix(adapter->pdev);
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+	return err;
+}
+
+static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
+{
+	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	for (i = 0; i < q_vectors; i++) {
+		struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+		bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+		bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+		q_vector->rxr_count = 0;
+		q_vector->txr_count = 0;
+		q_vector->eitr = adapter->eitr_param;
+	}
+}
+
+/**
+ * ixgbevf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
+{
+	int err = 0;
+
+	err = ixgbevf_request_msix_irqs(adapter);
+
+	if (err)
+		hw_dbg(&adapter->hw,
+		       "request_irq failed, Error %d\n", err);
+
+	return err;
+}
+
+static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i, q_vectors;
+
+	q_vectors = adapter->num_msix_vectors;
+
+	i = q_vectors - 1;
+
+	free_irq(adapter->msix_entries[i].vector, netdev);
+	i--;
+
+	for (; i >= 0; i--) {
+		free_irq(adapter->msix_entries[i].vector,
+			 adapter->q_vector[i]);
+	}
+
+	ixgbevf_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
+{
+	int i;
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+
+	IXGBE_WRITE_FLUSH(hw);
+
+	for (i = 0; i < adapter->num_msix_vectors; i++)
+		synchronize_irq(adapter->msix_entries[i].vector);
+}
+
+/**
+ * ixgbevf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
+				      bool queues, bool flush)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 mask;
+	u64 qmask;
+
+	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+	qmask = ~0;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+	if (queues)
+		ixgbevf_irq_enable_queues(adapter, qmask);
+
+	if (flush)
+		IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
+{
+	u64 tdba;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 i, j, tdlen, txctrl;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct ixgbevf_ring *ring = &adapter->tx_ring[i];
+		j = ring->reg_idx;
+		tdba = ring->dma;
+		tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
+				(tdba & DMA_BIT_MASK(32)));
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
+		adapter->tx_ring[i].head = IXGBE_VFTDH(j);
+		adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+		/* Disable Tx Head Writeback RO bit, since this hoses
+		 * bookkeeping if things aren't delivered in order.
+		 */
+		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
+		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
+	}
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
+
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+{
+	struct ixgbevf_ring *rx_ring;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 srrctl;
+
+	rx_ring = &adapter->rx_ring[index];
+
+	srrctl = IXGBE_SRRCTL_DROP_EN;
+
+	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+		u16 bufsz = IXGBEVF_RXBUFFER_2048;
+		/* grow the amount we can receive on large page machines */
+		if (bufsz < (PAGE_SIZE / 2))
+			bufsz = (PAGE_SIZE / 2);
+		/* cap the bufsz at our largest descriptor size */
+		bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
+
+		srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+		srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+		srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
+			   IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+			   IXGBE_SRRCTL_BSIZEHDR_MASK);
+	} else {
+		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+		if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+			srrctl |= IXGBEVF_RXBUFFER_2048 >>
+				IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+		else
+			srrctl |= rx_ring->rx_buf_len >>
+				IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
+}
+
+/**
+ * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
+{
+	u64 rdba;
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	int i, j;
+	u32 rdlen;
+	int rx_buf_len;
+
+	/* Decide whether to use packet split mode or not */
+	if (netdev->mtu > ETH_DATA_LEN) {
+		if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
+			adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+		else
+			adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+	} else {
+		if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
+			adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+		else
+			adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+	}
+
+	/* Set the RX buffer length according to the mode */
+	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+		/* PSRTYPE must be initialized in 82599 */
+		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+			IXGBE_PSRTYPE_UDPHDR |
+			IXGBE_PSRTYPE_IPV4HDR |
+			IXGBE_PSRTYPE_IPV6HDR |
+			IXGBE_PSRTYPE_L2HDR;
+		IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+		rx_buf_len = IXGBEVF_RX_HDR_SIZE;
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+		if (netdev->mtu <= ETH_DATA_LEN)
+			rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+		else
+			rx_buf_len = ALIGN(max_frame, 1024);
+	}
+
+	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rdba = adapter->rx_ring[i].dma;
+		j = adapter->rx_ring[i].reg_idx;
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
+				(rdba & DMA_BIT_MASK(32)));
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
+		adapter->rx_ring[i].head = IXGBE_VFRDH(j);
+		adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+
+		ixgbevf_configure_srrctl(adapter, j);
+	}
+}
+
+static void ixgbevf_vlan_rx_register(struct net_device *netdev,
+				     struct vlan_group *grp)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	int i, j;
+	u32 ctrl;
+
+	adapter->vlgrp = grp;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		j = adapter->rx_ring[i].reg_idx;
+		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+		ctrl |= IXGBE_RXDCTL_VME;
+		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
+	}
+}
+
+static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *v_netdev;
+
+	/* add VID to filter table */
+	if (hw->mac.ops.set_vfta)
+		hw->mac.ops.set_vfta(hw, vid, 0, true);
+	/*
+	 * Copy feature flags from netdev to the vlan netdev for this vid.
+	 * This allows things like TSO to bubble down to our vlan device.
+	 */
+	v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+	v_netdev->features |= adapter->netdev->features;
+	vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+}
+
+static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+		ixgbevf_irq_disable(adapter);
+
+	vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+		ixgbevf_irq_enable(adapter, true, true);
+
+	/* remove VID from filter table */
+	if (hw->mac.ops.set_vfta)
+		hw->mac.ops.set_vfta(hw, vid, 0, false);
+}
+
+static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
+{
+	ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+	if (adapter->vlgrp) {
+		u16 vid;
+		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+			if (!vlan_group_get_device(adapter->vlgrp, vid))
+				continue;
+			ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
+		}
+	}
+}
+
+static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+				 u32 *vmdq)
+{
+	struct dev_mc_list *mc_ptr;
+	u8 *addr = *mc_addr_ptr;
+	*vmdq = 0;
+
+	mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+	if (mc_ptr->next)
+		*mc_addr_ptr = mc_ptr->next->dmi_addr;
+	else
+		*mc_addr_ptr = NULL;
+
+	return addr;
+}
+
+/**
+ * ixgbevf_set_rx_mode - Multicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast mode.
+ **/
+static void ixgbevf_set_rx_mode(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u8 *addr_list = NULL;
+	int addr_count = 0;
+
+	/* reprogram multicast list */
+	addr_count = netdev->mc_count;
+	if (addr_count)
+		addr_list = netdev->mc_list->dmi_addr;
+	if (hw->mac.ops.update_mc_addr_list)
+		hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+						ixgbevf_addr_list_itr);
+}
+
+static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
+{
+	int q_idx;
+	struct ixgbevf_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		struct napi_struct *napi;
+		q_vector = adapter->q_vector[q_idx];
+		if (!q_vector->rxr_count)
+			continue;
+		napi = &q_vector->napi;
+		if (q_vector->rxr_count > 1)
+			napi->poll = &ixgbevf_clean_rxonly_many;
+
+		napi_enable(napi);
+	}
+}
+
+static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
+{
+	int q_idx;
+	struct ixgbevf_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		q_vector = adapter->q_vector[q_idx];
+		if (!q_vector->rxr_count)
+			continue;
+		napi_disable(&q_vector->napi);
+	}
+}
+
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	ixgbevf_set_rx_mode(netdev);
+
+	ixgbevf_restore_vlan(adapter);
+
+	ixgbevf_configure_tx(adapter);
+	ixgbevf_configure_rx(adapter);
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+		ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
+		ring->next_to_use = ring->count - 1;
+		writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+	}
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+						int rxr)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int j = adapter->rx_ring[rxr].reg_idx;
+	int k;
+
+	for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
+		if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+			break;
+		else
+			msleep(1);
+	}
+	if (k >= IXGBE_MAX_RX_DESC_POLL) {
+		hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
+		       "not set within the polling period\n", rxr);
+	}
+
+	ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+				(adapter->rx_ring[rxr].count - 1));
+}
+
+static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
+	int i, j = 0;
+	int num_rx_rings = adapter->num_rx_queues;
+	u32 txdctl, rxdctl;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+		/* enable WTHRESH=8 descriptors, to encourage burst writeback */
+		txdctl |= (8 << 16);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+		txdctl |= IXGBE_TXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+	}
+
+	for (i = 0; i < num_rx_rings; i++) {
+		j = adapter->rx_ring[i].reg_idx;
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+		rxdctl |= IXGBE_RXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+		ixgbevf_rx_desc_queue_enable(adapter, i);
+	}
+
+	ixgbevf_configure_msix(adapter);
+
+	if (hw->mac.ops.set_rar) {
+		if (is_valid_ether_addr(hw->mac.addr))
+			hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+		else
+			hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+	}
+
+	clear_bit(__IXGBEVF_DOWN, &adapter->state);
+	ixgbevf_napi_enable_all(adapter);
+
+	/* enable transmits */
+	netif_tx_start_all_queues(netdev);
+
+	/* bring the link up in the watchdog, this could race with our first
+	 * link up interrupt but shouldn't be a problem */
+	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+	adapter->link_check_timeout = jiffies;
+	mod_timer(&adapter->watchdog_timer, jiffies);
+	return 0;
+}
+
+int ixgbevf_up(struct ixgbevf_adapter *adapter)
+{
+	int err;
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	ixgbevf_configure(adapter);
+
+	err = ixgbevf_up_complete(adapter);
+
+	/* clear any pending interrupts, may auto mask */
+	IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+	ixgbevf_irq_enable(adapter, true, true);
+
+	return err;
+}
+
+/**
+ * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
+				  struct ixgbevf_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Rx ring sk_buffs */
+
+	for (i = 0; i < rx_ring->count; i++) {
+		struct ixgbevf_rx_buffer *rx_buffer_info;
+
+		rx_buffer_info = &rx_ring->rx_buffer_info[i];
+		if (rx_buffer_info->dma) {
+			pci_unmap_single(pdev, rx_buffer_info->dma,
+					 rx_ring->rx_buf_len,
+					 PCI_DMA_FROMDEVICE);
+			rx_buffer_info->dma = 0;
+		}
+		if (rx_buffer_info->skb) {
+			struct sk_buff *skb = rx_buffer_info->skb;
+			rx_buffer_info->skb = NULL;
+			do {
+				struct sk_buff *this = skb;
+				skb = skb->prev;
+				dev_kfree_skb(this);
+			} while (skb);
+		}
+		if (!rx_buffer_info->page)
+			continue;
+		pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
+			       PCI_DMA_FROMDEVICE);
+		rx_buffer_info->page_dma = 0;
+		put_page(rx_buffer_info->page);
+		rx_buffer_info->page = NULL;
+		rx_buffer_info->page_offset = 0;
+	}
+
+	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	if (rx_ring->head)
+		writel(0, adapter->hw.hw_addr + rx_ring->head);
+	if (rx_ring->tail)
+		writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
+				  struct ixgbevf_ring *tx_ring)
+{
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+	}
+
+	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	if (tx_ring->head)
+		writel(0, adapter->hw.hw_addr + tx_ring->head);
+	if (tx_ring->tail)
+		writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+void ixgbevf_down(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 txdctl;
+	int i, j;
+
+	/* signal that we are down to the interrupt handler */
+	set_bit(__IXGBEVF_DOWN, &adapter->state);
+	/* disable receives */
+
+	netif_tx_disable(netdev);
+
+	msleep(10);
+
+	netif_tx_stop_all_queues(netdev);
+
+	ixgbevf_irq_disable(adapter);
+
+	ixgbevf_napi_disable_all(adapter);
+
+	del_timer_sync(&adapter->watchdog_timer);
+	/* can't call flush scheduled work here because it can deadlock
+	 * if linkwatch_event tries to acquire the rtnl_lock which we are
+	 * holding */
+	while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
+		msleep(1);
+
+	/* disable transmits in the hardware now that interrupts are off */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
+				(txdctl & ~IXGBE_TXDCTL_ENABLE));
+	}
+
+	netif_carrier_off(netdev);
+
+	if (!pci_channel_offline(adapter->pdev))
+		ixgbevf_reset(adapter);
+
+	ixgbevf_clean_all_tx_rings(adapter);
+	ixgbevf_clean_all_rx_rings(adapter);
+}
+
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+		msleep(1);
+
+	ixgbevf_down(adapter);
+	ixgbevf_up(adapter);
+
+	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+}
+
+void ixgbevf_reset(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+
+	if (hw->mac.ops.reset_hw(hw))
+		hw_dbg(hw, "PF still resetting\n");
+	else
+		hw->mac.ops.init_hw(hw);
+
+	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+		       netdev->addr_len);
+		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+		       netdev->addr_len);
+	}
+}
+
+static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+					 int vectors)
+{
+	int err, vector_threshold;
+
+	/* We'll want at least 3 (vector_threshold):
+	 * 1) TxQ[0] Cleanup
+	 * 2) RxQ[0] Cleanup
+	 * 3) Other (Link Status Change, etc.)
+	 */
+	vector_threshold = MIN_MSIX_COUNT;
+
+	/* The more we get, the more we will assign to Tx/Rx Cleanup
+	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+	 * Right now, we simply care about how many we'll get; we'll
+	 * set them up later while requesting irq's.
+	 */
+	while (vectors >= vector_threshold) {
+		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+				      vectors);
+		if (!err) /* Success in acquiring all requested vectors. */
+			break;
+		else if (err < 0)
+			vectors = 0; /* Nasty failure, quit now */
+		else /* err == number of vectors we should try again with */
+			vectors = err;
+	}
+
+	if (vectors < vector_threshold) {
+		/* Can't allocate enough MSI-X interrupts?  Oh well.
+		 * This just means we'll go with either a single MSI
+		 * vector or fall back to legacy interrupts.
+		 */
+		hw_dbg(&adapter->hw,
+		       "Unable to allocate MSI-X interrupts\n");
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else {
+		/*
+		 * Adjust for only the vectors we'll use, which is minimum
+		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+		 * vectors we were allocated.
+		 */
+		adapter->num_msix_vectors = vectors;
+	}
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine.  The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features.  This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
+{
+	/* Start with base case */
+	adapter->num_rx_queues = 1;
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_pools = adapter->num_rx_queues;
+	adapter->num_rx_queues_per_pool = 1;
+}
+
+/**
+ * ixgbevf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.  The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		goto err_tx_ring_allocation;
+
+	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
+	if (!adapter->rx_ring)
+		goto err_rx_ring_allocation;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		adapter->tx_ring[i].count = adapter->tx_ring_count;
+		adapter->tx_ring[i].queue_index = i;
+		adapter->tx_ring[i].reg_idx = i;
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		adapter->rx_ring[i].count = adapter->rx_ring_count;
+		adapter->rx_ring[i].queue_index = i;
+		adapter->rx_ring[i].reg_idx = i;
+	}
+
+	return 0;
+
+err_rx_ring_allocation:
+	kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+	return -ENOMEM;
+}
+
+/**
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+	int err = 0;
+	int vector, v_budget;
+
+	/*
+	 * It's easy to be greedy for MSI-X vectors, but it really
+	 * doesn't do us much good if we have a lot more vectors
+	 * than CPU's.  So let's be conservative and only ask for
+	 * (roughly) twice the number of vectors as there are CPU's.
+	 */
+	v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+		       (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+	/* A failure in MSI-X entry allocation isn't fatal, but it does
+	 * mean we disable MSI-X capabilities of the adapter. */
+	adapter->msix_entries = kcalloc(v_budget,
+					sizeof(struct msix_entry), GFP_KERNEL);
+	if (!adapter->msix_entries) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	for (vector = 0; vector < v_budget; vector++)
+		adapter->msix_entries[vector].entry = vector;
+
+	ixgbevf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+	return err;
+}
+
+/**
+ * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
+{
+	int q_idx, num_q_vectors;
+	struct ixgbevf_q_vector *q_vector;
+	int napi_vectors;
+	int (*poll)(struct napi_struct *, int);
+
+	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+	napi_vectors = adapter->num_rx_queues;
+	poll = &ixgbevf_clean_rxonly;
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
+		if (!q_vector)
+			goto err_out;
+		q_vector->adapter = adapter;
+		q_vector->v_idx = q_idx;
+		q_vector->eitr = adapter->eitr_param;
+		if (q_idx < napi_vectors)
+			netif_napi_add(adapter->netdev, &q_vector->napi,
+				       (*poll), 64);
+		adapter->q_vector[q_idx] = q_vector;
+	}
+
+	return 0;
+
+err_out:
+	while (q_idx) {
+		q_idx--;
+		q_vector = adapter->q_vector[q_idx];
+		netif_napi_del(&q_vector->napi);
+		kfree(q_vector);
+		adapter->q_vector[q_idx] = NULL;
+	}
+	return -ENOMEM;
+}
+
+/**
+ * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
+{
+	int q_idx, num_q_vectors;
+	int napi_vectors;
+
+	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+	napi_vectors = adapter->num_rx_queues;
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+
+		adapter->q_vector[q_idx] = NULL;
+		if (q_idx < napi_vectors)
+			netif_napi_del(&q_vector->napi);
+		kfree(q_vector);
+	}
+}
+
+/**
+ * ixgbevf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+	pci_disable_msix(adapter->pdev);
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+
+	return;
+}
+
+/**
+ * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+	int err;
+
+	/* Number of supported queues */
+	ixgbevf_set_num_queues(adapter);
+
+	err = ixgbevf_set_interrupt_capability(adapter);
+	if (err) {
+		hw_dbg(&adapter->hw,
+		       "Unable to setup interrupt capabilities\n");
+		goto err_set_interrupt;
+	}
+
+	err = ixgbevf_alloc_q_vectors(adapter);
+	if (err) {
+		hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+		       "vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	err = ixgbevf_alloc_queues(adapter);
+	if (err) {
+		printk(KERN_ERR "Unable to allocate memory for queues\n");
+		goto err_alloc_queues;
+	}
+
+	hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
+	       "Tx Queue count = %u\n",
+	       (adapter->num_rx_queues > 1) ? "Enabled" :
+	       "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+	set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+	return 0;
+err_alloc_queues:
+	ixgbevf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+	ixgbevf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+	return err;
+}
+
+/**
+ * ixgbevf_sw_init - Initialize general software structures
+ * (struct ixgbevf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbevf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	hw->mbx.ops.init_params(hw);
+	hw->mac.max_tx_queues = MAX_TX_QUEUES;
+	hw->mac.max_rx_queues = MAX_RX_QUEUES;
+	err = hw->mac.ops.reset_hw(hw);
+	if (err) {
+		dev_info(&pdev->dev,
+		         "PF still in reset state, assigning new address\n");
+		random_ether_addr(hw->mac.addr);
+	} else {
+		err = hw->mac.ops.init_hw(hw);
+		if (err) {
+			printk(KERN_ERR "init_shared_code failed: %d\n", err);
+			goto out;
+		}
+	}
+
+	/* Enable dynamic interrupt throttling rates */
+	adapter->eitr_param = 20000;
+	adapter->itr_setting = 1;
+
+	/* set defaults for eitr in MegaBytes */
+	adapter->eitr_low = 10;
+	adapter->eitr_high = 20;
+
+	/* set default ring sizes */
+	adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
+	adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+
+	/* enable rx csum by default */
+	adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+	set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+out:
+	return err;
+}
+
+static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+	adapter->stats.last_vfgorc |=
+		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+	adapter->stats.last_vfgotc |=
+		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)	\
+	{							\
+		u32 current_counter = IXGBE_READ_REG(hw, reg);	\
+		if (current_counter < last_counter)		\
+			counter += 0x100000000LL;		\
+		last_counter = current_counter;			\
+		counter &= 0xFFFFFFFF00000000LL;		\
+		counter |= current_counter;			\
+	}
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+	{								 \
+		u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);	 \
+		u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);	 \
+		u64 current_counter = (current_counter_msb << 32) |      \
+			current_counter_lsb;                             \
+		if (current_counter < last_counter)			 \
+			counter += 0x1000000000LL;			 \
+		last_counter = current_counter;				 \
+		counter &= 0xFFFFFFF000000000LL;			 \
+		counter |= current_counter;				 \
+	}
+/**
+ * ixgbevf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+				adapter->stats.vfgprc);
+	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+				adapter->stats.vfgptc);
+	UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+				adapter->stats.last_vfgorc,
+				adapter->stats.vfgorc);
+	UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+				adapter->stats.last_vfgotc,
+				adapter->stats.vfgotc);
+	UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+				adapter->stats.vfmprc);
+
+	/* Fill out the OS statistics structure */
+	adapter->net_stats.multicast = adapter->stats.vfmprc -
+		adapter->stats.base_vfmprc;
+}
+
+/**
+ * ixgbevf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbevf_watchdog(unsigned long data)
+{
+	struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u64 eics = 0;
+	int i;
+
+	/*
+	 * Do the watchdog outside of interrupt context due to the lovely
+	 * delays that some of the newer hardware requires
+	 */
+
+	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+		goto watchdog_short_circuit;
+
+	/* get one bit for every active tx/rx interrupt vector */
+	for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+		struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+		if (qv->rxr_count || qv->txr_count)
+			eics |= (1 << i);
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+
+watchdog_short_circuit:
+	schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	schedule_work(&adapter->reset_task);
+}
+
+static void ixgbevf_reset_task(struct work_struct *work)
+{
+	struct ixgbevf_adapter *adapter;
+	adapter = container_of(work, struct ixgbevf_adapter, reset_task);
+
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
+		return;
+
+	adapter->tx_timeout_count++;
+
+	ixgbevf_reinit_locked(adapter);
+}
+
+/**
+ * ixgbevf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_task(struct work_struct *work)
+{
+	struct ixgbevf_adapter *adapter = container_of(work,
+						       struct ixgbevf_adapter,
+						       watchdog_task);
+	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 link_speed = adapter->link_speed;
+	bool link_up = adapter->link_up;
+
+	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+	/*
+	 * Always check the link on the watchdog because we have
+	 * no LSC interrupt
+	 */
+	if (hw->mac.ops.check_link) {
+		hw->mac.ops.check_link(hw, &link_speed,
+				       &link_up, false);
+	} else {
+		/* always assume link is up, if no check link
+		 * function */
+		link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+		link_up = true;
+	}
+	adapter->link_up = link_up;
+	adapter->link_speed = link_speed;
+
+	if (link_up) {
+		if (!netif_carrier_ok(netdev)) {
+			hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
+			       ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+				"10 Gbps" : "1 Gbps"));
+			netif_carrier_on(netdev);
+			netif_tx_wake_all_queues(netdev);
+		} else {
+			/* Force detection of hung controller */
+			adapter->detect_tx_hung = true;
+		}
+	} else {
+		adapter->link_up = false;
+		adapter->link_speed = 0;
+		if (netif_carrier_ok(netdev)) {
+			hw_dbg(&adapter->hw, "NIC Link is Down\n");
+			netif_carrier_off(netdev);
+			netif_tx_stop_all_queues(netdev);
+		}
+	}
+
+	ixgbevf_update_stats(adapter);
+
+	/* Force detection of hung controller every watchdog period */
+	adapter->detect_tx_hung = true;
+
+	/* Reset the timer */
+	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+		mod_timer(&adapter->watchdog_timer,
+			  round_jiffies(jiffies + (2 * HZ)));
+
+	adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * ixgbevf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
+			       struct ixgbevf_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+
+	ixgbevf_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+
+	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		if (adapter->tx_ring[i].desc)
+			ixgbevf_free_tx_resources(adapter,
+						  &adapter->tx_ring[i]);
+
+}
+
+/**
+ * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
+			       struct ixgbevf_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+	tx_ring->tx_buffer_info = vmalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+					     &tx_ring->dma);
+	if (!tx_ring->desc)
+		goto err;
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->work_limit = tx_ring->count;
+	return 0;
+
+err:
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+	hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
+	       "descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (!err)
+			continue;
+		hw_dbg(&adapter->hw,
+		       "Allocation for Tx Queue %u failed\n", i);
+		break;
+	}
+
+	return err;
+}
+
+/**
+ * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+			       struct ixgbevf_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+	rx_ring->rx_buffer_info = vmalloc(size);
+	if (!rx_ring->rx_buffer_info) {
+		hw_dbg(&adapter->hw,
+		       "Unable to vmalloc buffer memory for "
+		       "the receive descriptor ring\n");
+		goto alloc_failed;
+	}
+	memset(rx_ring->rx_buffer_info, 0, size);
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+					     &rx_ring->dma);
+
+	if (!rx_ring->desc) {
+		hw_dbg(&adapter->hw,
+		       "Unable to allocate memory for "
+		       "the receive descriptor ring\n");
+		vfree(rx_ring->rx_buffer_info);
+		rx_ring->rx_buffer_info = NULL;
+		goto alloc_failed;
+	}
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	return 0;
+alloc_failed:
+	return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (!err)
+			continue;
+		hw_dbg(adapter->hw,
+		       "Allocation for Rx Queue %u failed\n", i);
+		break;
+	}
+	return err;
+}
+
+/**
+ * ixgbevf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
+			       struct ixgbevf_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	ixgbevf_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		if (adapter->rx_ring[i].desc)
+			ixgbevf_free_rx_resources(adapter,
+						  &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbevf_open(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+		return -EBUSY;
+
+	if (hw->adapter_stopped) {
+		ixgbevf_reset(adapter);
+		/* if adapter is still stopped then PF isn't up and
+		 * the vf can't start. */
+		if (hw->adapter_stopped) {
+			err = IXGBE_ERR_MBX;
+			printk(KERN_ERR "Unable to start - perhaps the PF"
+			       "Driver isn't up yet\n");
+			goto err_setup_reset;
+		}
+	}
+
+	/* allocate transmit descriptors */
+	err = ixgbevf_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = ixgbevf_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	ixgbevf_configure(adapter);
+
+	/*
+	 * Map the Tx/Rx rings to the vectors we were allotted.
+	 * if request_irq will be called in this function map_rings
+	 * must be called *before* up_complete
+	 */
+	ixgbevf_map_rings_to_vectors(adapter);
+
+	err = ixgbevf_up_complete(adapter);
+	if (err)
+		goto err_up;
+
+	/* clear any pending interrupts, may auto mask */
+	IXGBE_READ_REG(hw, IXGBE_VTEICR);
+	err = ixgbevf_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	ixgbevf_irq_enable(adapter, true, true);
+
+	return 0;
+
+err_req_irq:
+	ixgbevf_down(adapter);
+err_up:
+	ixgbevf_free_irq(adapter);
+err_setup_rx:
+	ixgbevf_free_all_rx_resources(adapter);
+err_setup_tx:
+	ixgbevf_free_all_tx_resources(adapter);
+	ixgbevf_reset(adapter);
+
+err_setup_reset:
+
+	return err;
+}
+
+/**
+ * ixgbevf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbevf_close(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	ixgbevf_down(adapter);
+	ixgbevf_free_irq(adapter);
+
+	ixgbevf_free_all_tx_resources(adapter);
+	ixgbevf_free_all_rx_resources(adapter);
+
+	return 0;
+}
+
+static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
+		       struct ixgbevf_ring *tx_ring,
+		       struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+	struct ixgbe_adv_tx_context_desc *context_desc;
+	unsigned int i;
+	int err;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+	u32 mss_l4len_idx, l4len;
+
+	if (skb_is_gso(skb)) {
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+		l4len = tcp_hdrlen(skb);
+		*hdr_len += l4len;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->tot_len = 0;
+			iph->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+			adapter->hw_tso_ctxt++;
+		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+					     &ipv6_hdr(skb)->daddr,
+					     0, IPPROTO_TCP, 0);
+			adapter->hw_tso6_ctxt++;
+		}
+
+		i = tx_ring->next_to_use;
+
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+		/* VLAN MACLEN IPLEN */
+		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+			vlan_macip_lens |=
+				(tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+		vlan_macip_lens |= ((skb_network_offset(skb)) <<
+				    IXGBE_ADVTXD_MACLEN_SHIFT);
+		*hdr_len += skb_network_offset(skb);
+		vlan_macip_lens |=
+			(skb_transport_header(skb) - skb_network_header(skb));
+		*hdr_len +=
+			(skb_transport_header(skb) - skb_network_header(skb));
+		context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+		context_desc->seqnum_seed = 0;
+
+		/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+		type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+				    IXGBE_ADVTXD_DTYP_CTXT);
+
+		if (skb->protocol == htons(ETH_P_IP))
+			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+		/* MSS L4LEN IDX */
+		mss_l4len_idx =
+			(skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+		mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+		/* use index 1 for TSO */
+		mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+		context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+		tx_buffer_info->time_stamp = jiffies;
+		tx_buffer_info->next_to_watch = i;
+
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+		tx_ring->next_to_use = i;
+
+		return true;
+	}
+
+	return false;
+}
+
+static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
+			    struct ixgbevf_ring *tx_ring,
+			    struct sk_buff *skb, u32 tx_flags)
+{
+	struct ixgbe_adv_tx_context_desc *context_desc;
+	unsigned int i;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL ||
+	    (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
+		i = tx_ring->next_to_use;
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+			vlan_macip_lens |= (tx_flags &
+					    IXGBE_TX_FLAGS_VLAN_MASK);
+		vlan_macip_lens |= (skb_network_offset(skb) <<
+				    IXGBE_ADVTXD_MACLEN_SHIFT);
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			vlan_macip_lens |= (skb_transport_header(skb) -
+					    skb_network_header(skb));
+
+		context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+		context_desc->seqnum_seed = 0;
+
+		type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+				    IXGBE_ADVTXD_DTYP_CTXT);
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			switch (skb->protocol) {
+			case __constant_htons(ETH_P_IP):
+				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+					type_tucmd_mlhl |=
+					    IXGBE_ADVTXD_TUCMD_L4T_TCP;
+				break;
+			case __constant_htons(ETH_P_IPV6):
+				/* XXX what about other V6 headers?? */
+				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+					type_tucmd_mlhl |=
+						IXGBE_ADVTXD_TUCMD_L4T_TCP;
+				break;
+			default:
+				if (unlikely(net_ratelimit())) {
+					printk(KERN_WARNING
+					       "partial checksum but "
+					       "proto=%x!\n",
+					       skb->protocol);
+				}
+				break;
+			}
+		}
+
+		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+		/* use index zero for tx checksum offload */
+		context_desc->mss_l4len_idx = 0;
+
+		tx_buffer_info->time_stamp = jiffies;
+		tx_buffer_info->next_to_watch = i;
+
+		adapter->hw_csum_tx_good++;
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+		tx_ring->next_to_use = i;
+
+		return true;
+	}
+
+	return false;
+}
+
+static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
+			  struct ixgbevf_ring *tx_ring,
+			  struct sk_buff *skb, u32 tx_flags,
+			  unsigned int first)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	unsigned int len;
+	unsigned int total = skb->len;
+	unsigned int offset = 0, size, count = 0, i;
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+	unsigned int f;
+
+	i = tx_ring->next_to_use;
+
+	len = min(skb_headlen(skb), total);
+	while (len) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+		tx_buffer_info->length = size;
+		tx_buffer_info->mapped_as_page = false;
+		tx_buffer_info->dma = pci_map_single(adapter->pdev,
+						     skb->data + offset,
+						     size, PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+			goto dma_error;
+		tx_buffer_info->time_stamp = jiffies;
+		tx_buffer_info->next_to_watch = i;
+
+		len -= size;
+		total -= size;
+		offset += size;
+		count++;
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+	}
+
+	for (f = 0; f < nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+		len = min((unsigned int)frag->size, total);
+		offset = frag->page_offset;
+
+		while (len) {
+			tx_buffer_info = &tx_ring->tx_buffer_info[i];
+			size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+			tx_buffer_info->length = size;
+			tx_buffer_info->dma = pci_map_page(adapter->pdev,
+							   frag->page,
+							   offset,
+							   size,
+							   PCI_DMA_TODEVICE);
+			tx_buffer_info->mapped_as_page = true;
+			if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+				goto dma_error;
+			tx_buffer_info->time_stamp = jiffies;
+			tx_buffer_info->next_to_watch = i;
+
+			len -= size;
+			total -= size;
+			offset += size;
+			count++;
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+		}
+		if (total == 0)
+			break;
+	}
+
+	if (i == 0)
+		i = tx_ring->count - 1;
+	else
+		i = i - 1;
+	tx_ring->tx_buffer_info[i].skb = skb;
+	tx_ring->tx_buffer_info[first].next_to_watch = i;
+
+	return count;
+
+dma_error:
+	dev_err(&pdev->dev, "TX DMA map failed\n");
+
+	/* clear timestamp and dma mappings for failed tx_buffer_info map */
+	tx_buffer_info->dma = 0;
+	tx_buffer_info->time_stamp = 0;
+	tx_buffer_info->next_to_watch = 0;
+	count--;
+
+	/* clear timestamp and dma mappings for remaining portion of packet */
+	while (count >= 0) {
+		count--;
+		i--;
+		if (i < 0)
+			i += tx_ring->count;
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+	}
+
+	return count;
+}
+
+static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
+			     struct ixgbevf_ring *tx_ring, int tx_flags,
+			     int count, u32 paylen, u8 hdr_len)
+{
+	union ixgbe_adv_tx_desc *tx_desc = NULL;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	u32 olinfo_status = 0, cmd_type_len = 0;
+	unsigned int i;
+
+	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+
+	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+	if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+			IXGBE_ADVTXD_POPTS_SHIFT;
+
+		/* use index 1 context for tso */
+		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+			olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+				IXGBE_ADVTXD_POPTS_SHIFT;
+
+	} else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+			IXGBE_ADVTXD_POPTS_SHIFT;
+
+	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+	i = tx_ring->next_to_use;
+	while (count--) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+		tx_desc->read.cmd_type_len =
+			cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+	}
+
+	tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
+				   struct ixgbevf_ring *tx_ring, int size)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	netif_stop_subqueue(netdev, tx_ring->queue_index);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it. */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available. */
+	if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(netdev, tx_ring->queue_index);
+	++adapter->restart_queue;
+	return 0;
+}
+
+static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
+				 struct ixgbevf_ring *tx_ring, int size)
+{
+	if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbevf_ring *tx_ring;
+	unsigned int first;
+	unsigned int tx_flags = 0;
+	u8 hdr_len = 0;
+	int r_idx = 0, tso;
+	int count = 0;
+
+	unsigned int f;
+
+	tx_ring = &adapter->tx_ring[r_idx];
+
+	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+		tx_flags |= vlan_tx_tag_get(skb);
+		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= IXGBE_TX_FLAGS_VLAN;
+	}
+
+	/* four things can cause us to need a context descriptor */
+	if (skb_is_gso(skb) ||
+	    (skb->ip_summed == CHECKSUM_PARTIAL) ||
+	    (tx_flags & IXGBE_TX_FLAGS_VLAN))
+		count++;
+
+	count += TXD_USE_COUNT(skb_headlen(skb));
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+	if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
+		adapter->tx_busy++;
+		return NETDEV_TX_BUSY;
+	}
+
+	first = tx_ring->next_to_use;
+
+	if (skb->protocol == htons(ETH_P_IP))
+		tx_flags |= IXGBE_TX_FLAGS_IPV4;
+	tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+	if (tso < 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (tso)
+		tx_flags |= IXGBE_TX_FLAGS_TSO;
+	else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+		 (skb->ip_summed == CHECKSUM_PARTIAL))
+		tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+	ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
+			 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+			 skb->len, hdr_len);
+
+	netdev->trans_start = jiffies;
+
+	ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * ixgbevf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ * ixgbevf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_set_mac(struct net_device *netdev, void *p)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+	if (hw->mac.ops.set_rar)
+		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+
+	return 0;
+}
+
+/**
+ * ixgbevf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+	/* MTU < 68 is an error and causes problems on some kernels */
+	if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+		return -EINVAL;
+
+	hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+	       netdev->mtu, new_mtu);
+	/* must set new MTU before calling down or up */
+	netdev->mtu = new_mtu;
+
+	if (netif_running(netdev))
+		ixgbevf_reinit_locked(adapter);
+
+	return 0;
+}
+
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		ixgbevf_down(adapter);
+		ixgbevf_free_irq(adapter);
+		ixgbevf_free_all_tx_resources(adapter);
+		ixgbevf_free_all_rx_resources(adapter);
+	}
+
+#ifdef CONFIG_PM
+	pci_save_state(pdev);
+#endif
+
+	pci_disable_device(pdev);
+}
+
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops ixgbe_netdev_ops = {
+	.ndo_open		= &ixgbevf_open,
+	.ndo_stop		= &ixgbevf_close,
+	.ndo_start_xmit		= &ixgbevf_xmit_frame,
+	.ndo_get_stats		= &ixgbevf_get_stats,
+	.ndo_set_rx_mode	= &ixgbevf_set_rx_mode,
+	.ndo_set_multicast_list	= &ixgbevf_set_rx_mode,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= &ixgbevf_set_mac,
+	.ndo_change_mtu		= &ixgbevf_change_mtu,
+	.ndo_tx_timeout		= &ixgbevf_tx_timeout,
+	.ndo_vlan_rx_register	= &ixgbevf_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= &ixgbevf_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= &ixgbevf_vlan_rx_kill_vid,
+};
+#endif /* HAVE_NET_DEVICE_OPS */
+
+static void ixgbevf_assign_netdev_ops(struct net_device *dev)
+{
+	struct ixgbevf_adapter *adapter;
+	adapter = netdev_priv(dev);
+#ifdef HAVE_NET_DEVICE_OPS
+	dev->netdev_ops = &ixgbe_netdev_ops;
+#else /* HAVE_NET_DEVICE_OPS */
+	dev->open = &ixgbevf_open;
+	dev->stop = &ixgbevf_close;
+
+	dev->hard_start_xmit = &ixgbevf_xmit_frame;
+
+	dev->get_stats = &ixgbevf_get_stats;
+	dev->set_multicast_list = &ixgbevf_set_rx_mode;
+	dev->set_mac_address = &ixgbevf_set_mac;
+	dev->change_mtu = &ixgbevf_change_mtu;
+	dev->tx_timeout = &ixgbevf_tx_timeout;
+	dev->vlan_rx_register = &ixgbevf_vlan_rx_register;
+	dev->vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid;
+	dev->vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid;
+#endif /* HAVE_NET_DEVICE_OPS */
+	ixgbevf_set_ethtool_ops(dev);
+	dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * ixgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbevf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbevf_probe(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct ixgbevf_adapter *adapter = NULL;
+	struct ixgbe_hw *hw = NULL;
+	const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
+	static int cards_found;
+	int err, pci_using_dac;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		pci_using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+							  DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev, "No usable DMA "
+					"configuration, aborting\n");
+				goto err_dma;
+			}
+		}
+		pci_using_dac = 0;
+	}
+
+	err = pci_request_regions(pdev, ixgbevf_driver_name);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+		goto err_pci_reg;
+	}
+
+	pci_set_master(pdev);
+
+#ifdef HAVE_TX_MQ
+	netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
+				   MAX_TX_QUEUES);
+#else
+	netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
+#endif
+	if (!netdev) {
+		err = -ENOMEM;
+		goto err_alloc_etherdev;
+	}
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	hw = &adapter->hw;
+	hw->back = adapter;
+	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+	/*
+	 * call save state here in standalone driver because it relies on
+	 * adapter struct to exist, and needs to call netdev_priv
+	 */
+	pci_save_state(pdev);
+
+	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+			      pci_resource_len(pdev, 0));
+	if (!hw->hw_addr) {
+		err = -EIO;
+		goto err_ioremap;
+	}
+
+	ixgbevf_assign_netdev_ops(netdev);
+
+	adapter->bd_number = cards_found;
+
+	/* Setup hw api */
+	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+	hw->mac.type  = ii->mac;
+
+	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+	       sizeof(struct ixgbe_mac_operations));
+
+	adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
+	adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+	adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+
+	/* setup the private structure */
+	err = ixgbevf_sw_init(adapter);
+
+	ixgbevf_init_last_counter_stats(adapter);
+
+#ifdef MAX_SKB_FRAGS
+	netdev->features = NETIF_F_SG |
+			   NETIF_F_IP_CSUM |
+			   NETIF_F_HW_VLAN_TX |
+			   NETIF_F_HW_VLAN_RX |
+			   NETIF_F_HW_VLAN_FILTER;
+
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+	netdev->vlan_features |= NETIF_F_TSO;
+	netdev->vlan_features |= NETIF_F_TSO6;
+	netdev->vlan_features |= NETIF_F_IP_CSUM;
+	netdev->vlan_features |= NETIF_F_SG;
+
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+#endif /* MAX_SKB_FRAGS */
+
+	/* The HW MAC address was set and/or determined in sw_init */
+	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+	if (ixgbevf_validate_mac_addr(netdev->dev_addr)) {
+		printk(KERN_ERR "invalid MAC address\n");
+		err = -EIO;
+		goto err_sw_init;
+	}
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = &ixgbevf_watchdog;
+	adapter->watchdog_timer.data = (unsigned long)adapter;
+
+	INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
+	INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
+
+	err = ixgbevf_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_sw_init;
+
+	/* pick up the PCI bus settings for reporting later */
+	if (hw->mac.ops.get_bus_info)
+		hw->mac.ops.get_bus_info(hw);
+
+
+	netif_carrier_off(netdev);
+	netif_tx_stop_all_queues(netdev);
+
+	strcpy(netdev->name, "eth%d");
+
+	err = register_netdev(netdev);
+	if (err)
+		goto err_register;
+
+	adapter->netdev_registered = true;
+
+	/* print the MAC address */
+	hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+	       netdev->dev_addr[0],
+	       netdev->dev_addr[1],
+	       netdev->dev_addr[2],
+	       netdev->dev_addr[3],
+	       netdev->dev_addr[4],
+	       netdev->dev_addr[5]);
+
+	hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+
+	hw_dbg(hw, "LRO is disabled \n");
+
+	hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
+	cards_found++;
+	return 0;
+
+err_register:
+err_sw_init:
+	ixgbevf_reset_interrupt_capability(adapter);
+	iounmap(hw->hw_addr);
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * ixgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+	del_timer_sync(&adapter->watchdog_timer);
+
+	cancel_work_sync(&adapter->watchdog_task);
+
+	flush_scheduled_work();
+
+	if (adapter->netdev_registered) {
+		unregister_netdev(netdev);
+		adapter->netdev_registered = false;
+	}
+
+	ixgbevf_reset_interrupt_capability(adapter);
+
+	iounmap(adapter->hw.hw_addr);
+	pci_release_regions(pdev);
+
+	hw_dbg(hw, "Remove complete\n");
+
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	free_netdev(netdev);
+
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver ixgbevf_driver = {
+	.name     = ixgbevf_driver_name,
+	.id_table = ixgbevf_pci_tbl,
+	.probe    = ixgbevf_probe,
+	.remove   = __devexit_p(ixgbevf_remove),
+	.shutdown = ixgbevf_shutdown,
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbevf_init_module(void)
+{
+	int ret;
+	printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
+	       ixgbevf_driver_version);
+
+	printk(KERN_INFO "%s\n", ixgbevf_copyright);
+
+	ret = pci_register_driver(&ixgbevf_driver);
+	return ret;
+}
+
+module_init(ixgbevf_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbevf_exit_module(void)
+{
+	pci_unregister_driver(&ixgbevf_driver);
+}
+
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+	struct ixgbevf_adapter *adapter = hw->back;
+	return adapter->netdev->name;
+}
+
+#endif
+module_exit(ixgbevf_exit_module);
+
+/* ixgbevf_main.c */

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ