lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <2023040555-hatching-only-e48c@gregkh>
Date:   Wed,  5 Apr 2023 11:46:55 +0200
From:   Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To:     linux-kernel@...r.kernel.org, akpm@...ux-foundation.org,
        torvalds@...ux-foundation.org, stable@...r.kernel.org
Cc:     lwn@....net, jslaby@...e.cz,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Subject: Re: Linux 4.19.280

diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 02a323c43261..2a9dbac38b4e 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -94,8 +94,8 @@ gianfar.txt
 	- Gianfar Ethernet Driver.
 i40e.txt
 	- README for the Intel Ethernet Controller XL710 Driver (i40e).
-i40evf.txt
-	- Short note on the Driver for the Intel(R) XL710 X710 Virtual Function
+iavf.txt
+	- README for the Intel Ethernet Adaptive Virtual Function Driver (iavf).
 ieee802154.txt
 	- Linux IEEE 802.15.4 implementation, API and drivers
 igb.txt
diff --git a/Documentation/networking/i40evf.txt b/Documentation/networking/i40evf.txt
deleted file mode 100644
index e9b3035b95d0..000000000000
--- a/Documentation/networking/i40evf.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-Linux* Base Driver for Intel(R) Network Connection
-==================================================
-
-Intel Ethernet Adaptive Virtual Function Linux driver.
-Copyright(c) 2013-2017 Intel Corporation.
-
-Contents
-========
-
-- Identifying Your Adapter
-- Known Issues/Troubleshooting
-- Support
-
-This file describes the i40evf Linux* Base Driver.
-
-The i40evf driver supports the below mentioned virtual function
-devices and can only be activated on kernels running the i40e or
-newer Physical Function (PF) driver compiled with CONFIG_PCI_IOV.
-The i40evf driver requires CONFIG_PCI_MSI to be enabled.
-
-The guest OS loading the i40evf driver must support MSI-X interrupts.
-
-Supported Hardware
-==================
-Intel XL710 X710 Virtual Function
-Intel Ethernet Adaptive Virtual Function
-Intel X722 Virtual Function
-
-Identifying Your Adapter
-========================
-
-For more information on how to identify your adapter, go to the
-Adapter & Driver ID Guide at:
-
-    http://support.intel.com/support/go/network/adapter/idguide.htm
-
-Known Issues/Troubleshooting
-============================
-
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
-    http://support.intel.com
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
-    http://sourceforge.net/projects/e1000
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@...ts.sf.net
diff --git a/Documentation/networking/iavf.txt b/Documentation/networking/iavf.txt
new file mode 100644
index 000000000000..cc902a2369d6
--- /dev/null
+++ b/Documentation/networking/iavf.txt
@@ -0,0 +1,56 @@
+Linux* Base Driver for Intel(R) Network Connection
+==================================================
+
+Intel Ethernet Adaptive Virtual Function Linux driver.
+Copyright(c) 2013-2018 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Known Issues/Troubleshooting
+- Support
+
+This file describes the iavf Linux* Base Driver. This driver
+was formerly called i40evf.
+
+The iavf driver supports the below mentioned virtual function
+devices and can only be activated on kernels running the i40e or
+newer Physical Function (PF) driver compiled with CONFIG_PCI_IOV.
+The iavf driver requires CONFIG_PCI_MSI to be enabled.
+
+The guest OS loading the iavf driver must support MSI-X interrupts.
+
+Supported Hardware
+==================
+Intel XL710 X710 Virtual Function
+Intel X722 Virtual Function
+Intel Ethernet Adaptive Virtual Function
+
+Identifying Your Adapter
+========================
+
+For more information on how to identify your adapter, go to the
+Adapter & Driver ID Guide at:
+
+    https://www.intel.com/content/www/us/en/support/articles/000005584/network-and-i-o/ethernet-products.html
+
+
+Known Issues/Troubleshooting
+============================
+
+
+Support
+=======
+
+For general information, go to the Intel support website at:
+
+    http://support.intel.com
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+    http://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on the supported
+kernel with a supported adapter, email the specific information related
+to the issue to e1000-devel@...ts.sf.net
diff --git a/MAINTAINERS b/MAINTAINERS
index af0f322cf2f7..a8015db6b37e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7377,7 +7377,7 @@ F:	Documentation/networking/ixgb.txt
 F:	Documentation/networking/ixgbe.txt
 F:	Documentation/networking/ixgbevf.txt
 F:	Documentation/networking/i40e.txt
-F:	Documentation/networking/i40evf.txt
+F:	Documentation/networking/iavf.txt
 F:	Documentation/networking/ice.txt
 F:	drivers/net/ethernet/intel/
 F:	drivers/net/ethernet/intel/*/
diff --git a/Makefile b/Makefile
index d6c4a53bf505..c70637ed93cd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 279
+SUBLEVEL = 280
 EXTRAVERSION =
 NAME = "People's Front"
 
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 9b70a7f5e705..35f706d836c5 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -30,6 +30,7 @@
 #include <linux/init.h>
 #include <linux/ptrace.h>
 #include <linux/kallsyms.h>
+#include <linux/extable.h>
 
 #include <asm/setup.h>
 #include <asm/fpu.h>
@@ -550,7 +551,8 @@ static inline void bus_error030 (struct frame *fp)
 			errorcode |= 2;
 
 		if (mmusr & (MMU_I | MMU_WP)) {
-			if (ssw & 4) {
+			/* We might have an exception table for this PC */
+			if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
 				pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
 				       ssw & RW ? "read" : "write",
 				       fp->un.fmtb.daddr,
diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h
new file mode 100644
index 000000000000..66b13a522880
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/setup.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+
+#ifndef _UAPI_ASM_RISCV_SETUP_H
+#define _UAPI_ASM_RISCV_SETUP_H
+
+#define COMMAND_LINE_SIZE	1024
+
+#endif /* _UAPI_ASM_RISCV_SETUP_H */
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 0267405ab7c6..fcfd78f99cb4 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -339,7 +339,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
 		"4: slgr  %0,%0\n"
 		"5:\n"
 		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
-		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
+		: "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
 		: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
 	return size;
 }
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 95100d8a0b7b..fc94603724b8 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -57,6 +57,7 @@
 #define SR_FD		0x00008000
 #define SR_MD		0x40000000
 
+#define SR_USER_MASK	0x00000303	// M, Q, S, T bits
 /*
  * DSP structure and data
  */
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index c46c0020ff55..ce93ae78c300 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -116,6 +116,7 @@ static int
 restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
 {
 	unsigned int err = 0;
+	unsigned int sr = regs->sr & ~SR_USER_MASK;
 
 #define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
 			COPY(regs[1]);
@@ -131,6 +132,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
 	COPY(sr);	COPY(pc);
 #undef COPY
 
+	regs->sr = (regs->sr & SR_USER_MASK) | sr;
+
 #ifdef CONFIG_SH_FPU
 	if (boot_cpu_data.flags & CPU_HAS_FPU) {
 		int owned_fp;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index bc06f5919839..3380322df98e 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2915,6 +2915,7 @@ close_card_oam(struct idt77252_dev *card)
 
 				recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
 			}
+			kfree(vc);
 		}
 	}
 }
@@ -2958,6 +2959,15 @@ open_card_ubr0(struct idt77252_dev *card)
 	return 0;
 }
 
+static void
+close_card_ubr0(struct idt77252_dev *card)
+{
+	struct vc_map *vc = card->vcs[0];
+
+	free_scq(card, vc->scq);
+	kfree(vc);
+}
+
 static int
 idt77252_dev_open(struct idt77252_dev *card)
 {
@@ -3007,6 +3017,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
 	struct idt77252_dev *card = dev->dev_data;
 	u32 conf;
 
+	close_card_ubr0(card);
 	close_card_oam(card);
 
 	conf = SAR_CFG_RXPTH |	/* enable receive path           */
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 874172aa8e41..a698b1f6394b 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -146,6 +146,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
 	return 0;
 }
 
+static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+	int ret;
+
+	ret = qca_set_bdaddr_rome(hdev, bdaddr);
+	if (ret)
+		return ret;
+
+	/* The firmware stops responding for a while after setting the bdaddr,
+	 * causing timeouts for subsequent commands. Sleep a bit to avoid this.
+	 */
+	usleep_range(1000, 10000);
+	return 0;
+}
+
 static int btqcomsmd_probe(struct platform_device *pdev)
 {
 	struct btqcomsmd *btq;
@@ -195,7 +210,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
 	hdev->close = btqcomsmd_close;
 	hdev->send = btqcomsmd_send;
 	hdev->setup = btqcomsmd_setup;
-	hdev->set_bdaddr = qca_set_bdaddr_rome;
+	hdev->set_bdaddr = btqcomsmd_set_bdaddr;
 
 	ret = hci_register_dev(hdev);
 	if (ret < 0)
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 20142bc77554..bd55bf7a9914 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -353,6 +353,7 @@ static void btsdio_remove(struct sdio_func *func)
 
 	BT_DBG("func %p", func);
 
+	cancel_work_sync(&data->work);
 	if (!data)
 		return;
 
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 6a94aa6a22c2..1a0f977904b6 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -146,8 +146,8 @@ static int __init weim_parse_dt(struct platform_device *pdev,
 	const struct of_device_id *of_id = of_match_device(weim_id_table,
 							   &pdev->dev);
 	const struct imx_weim_devtype *devtype = of_id->data;
+	int ret = 0, have_child = 0;
 	struct device_node *child;
-	int ret, have_child = 0;
 
 	if (devtype == &imx50_weim_devtype) {
 		ret = imx_weim_gpr_setup(pdev);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index e8cd66705ad7..5ccbbb3eb68e 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -705,6 +705,39 @@ static int scmi_remove(struct platform_device *pdev)
 	return ret;
 }
 
+static int scmi_mailbox_chan_validate(struct device *cdev)
+{
+	int num_mb, num_sh, ret = 0;
+	struct device_node *np = cdev->of_node;
+
+	num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+	num_sh = of_count_phandle_with_args(np, "shmem", NULL);
+	/* Bail out if mboxes and shmem descriptors are inconsistent */
+	if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
+		dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
+			 of_node_full_name(np));
+		return -EINVAL;
+	}
+
+	if (num_sh > 1) {
+		struct device_node *np_tx, *np_rx;
+
+		np_tx = of_parse_phandle(np, "shmem", 0);
+		np_rx = of_parse_phandle(np, "shmem", 1);
+		/* SCMI Tx and Rx shared mem areas have to be distinct */
+		if (!np_tx || !np_rx || np_tx == np_rx) {
+			dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
+				 of_node_full_name(np));
+			ret = -EINVAL;
+		}
+
+		of_node_put(np_tx);
+		of_node_put(np_rx);
+	}
+
+	return ret;
+}
+
 static inline int
 scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
 {
@@ -720,6 +753,10 @@ scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
 		goto idr_alloc;
 	}
 
+	ret = scmi_mailbox_chan_validate(dev);
+	if (ret)
+		return ret;
+
 	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
 	if (!cinfo)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index f21529e635e3..a9506a390f98 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
 static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 		struct vm_area_struct *vma)
 {
-	return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+	int ret;
+
+	ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+	if (!ret) {
+		/* Drop the reference acquired by drm_gem_mmap_obj(). */
+		drm_gem_object_put(&etnaviv_obj->base);
+	}
+
+	return ret;
 }
 
 static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index f8499cb95fec..4e4e151760db 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -495,6 +495,8 @@ static const struct it87_devices it87_devices[] = {
 #define has_pwm_freq2(data)	((data)->features & FEAT_PWM_FREQ2)
 #define has_six_temp(data)	((data)->features & FEAT_SIX_TEMP)
 #define has_vin3_5v(data)	((data)->features & FEAT_VIN3_5V)
+#define has_scaling(data)	((data)->features & (FEAT_12MV_ADC | \
+						     FEAT_10_9MV_ADC))
 
 struct it87_sio_data {
 	int sioaddr;
@@ -3107,7 +3109,7 @@ static int it87_probe(struct platform_device *pdev)
 			 "Detected broken BIOS defaults, disabling PWM interface\n");
 
 	/* Starting with IT8721F, we handle scaling of internal voltages */
-	if (has_12mv_adc(data)) {
+	if (has_scaling(data)) {
 		if (sio_data->internal & BIT(0))
 			data->in_scaled |= BIT(3);	/* in3 is AVCC */
 		if (sio_data->internal & BIT(1))
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 06c4c767af32..90c510d16651 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -508,10 +508,14 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
 static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
 {
 	struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+	unsigned int enabled;
 	unsigned int temp;
 
+	enabled = readl(lpi2c_imx->base + LPI2C_MIER);
+
 	lpi2c_imx_intctrl(lpi2c_imx, 0);
 	temp = readl(lpi2c_imx->base + LPI2C_MSR);
+	temp &= enabled;
 
 	if (temp & MSR_RDF)
 		lpi2c_imx_read_rxfifo(lpi2c_imx);
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index a7ac746018ad..7a746f413535 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -321,6 +321,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
 	u32 msg[3];
 	int rc;
 
+	if (writelen > I2C_SMBUS_BLOCK_MAX)
+		return -EINVAL;
+
 	memcpy(ctx->dma_buffer, data, writelen);
 	paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
 			       DMA_TO_DEVICE);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index dd80ff6cc427..b53da6360235 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -855,8 +855,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
 			x = y = z = 0;
 
 		/* Divide 4 since trackpoint's speed is too fast */
-		input_report_rel(dev2, REL_X, (char)x / 4);
-		input_report_rel(dev2, REL_Y, -((char)y / 4));
+		input_report_rel(dev2, REL_X, (s8)x / 4);
+		input_report_rel(dev2, REL_Y, -((s8)y / 4));
 
 		psmouse_report_standard_buttons(dev2, packet[3]);
 
@@ -1107,8 +1107,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
 	    ((packet[3] & 0x20) << 1);
 	z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
 
-	input_report_rel(dev2, REL_X, (char)x);
-	input_report_rel(dev2, REL_Y, -((char)y));
+	input_report_rel(dev2, REL_X, (s8)x);
+	input_report_rel(dev2, REL_Y, -((s8)y));
 	input_report_abs(dev2, ABS_PRESSURE, z);
 
 	psmouse_report_standard_buttons(dev2, packet[1]);
@@ -2297,20 +2297,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
 	if (reg < 0)
 		return reg;
 
-	x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+	x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
 	x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
 
-	y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
+	y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
 	y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
 
 	reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
 	if (reg < 0)
 		return reg;
 
-	x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+	x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
 	x_electrode = 17 + x_electrode;
 
-	y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
+	y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
 	y_electrode = 13 + y_electrode;
 
 	x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index a7d39689bbfb..4bd48b81ed98 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -206,8 +206,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
 	state->pressed = packet[0] >> 7;
 	finger1 = ((packet[0] >> 4) & 0x7) - 1;
 	if (finger1 < FOC_MAX_FINGERS) {
-		state->fingers[finger1].x += (char)packet[1];
-		state->fingers[finger1].y += (char)packet[2];
+		state->fingers[finger1].x += (s8)packet[1];
+		state->fingers[finger1].y += (s8)packet[2];
 	} else {
 		psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
 			    finger1);
@@ -222,8 +222,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
 	 */
 	finger2 = ((packet[3] >> 4) & 0x7) - 1;
 	if (finger2 < FOC_MAX_FINGERS) {
-		state->fingers[finger2].x += (char)packet[4];
-		state->fingers[finger2].y += (char)packet[5];
+		state->fingers[finger2].x += (s8)packet[4];
+		state->fingers[finger2].y += (s8)packet[5];
 	}
 }
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index e38c713e882e..908bf0768827 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1661,6 +1661,7 @@ static int dmcrypt_write(void *data)
 			io = crypt_io_from_node(rb_first(&write_tree));
 			rb_erase(&io->rb_node, &write_tree);
 			kcryptd_io_write(io);
+			cond_resched();
 		} while (!RB_EMPTY_ROOT(&write_tree));
 		blk_finish_plug(&plug);
 	}
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 3d59f3e208c5..0eb48e739f7e 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
 	       atomic_read(&shared->in_flight[WRITE]);
 }
 
-void dm_stats_init(struct dm_stats *stats)
+int dm_stats_init(struct dm_stats *stats)
 {
 	int cpu;
 	struct dm_stats_last_position *last;
@@ -196,11 +196,16 @@ void dm_stats_init(struct dm_stats *stats)
 	mutex_init(&stats->mutex);
 	INIT_LIST_HEAD(&stats->list);
 	stats->last = alloc_percpu(struct dm_stats_last_position);
+	if (!stats->last)
+		return -ENOMEM;
+
 	for_each_possible_cpu(cpu) {
 		last = per_cpu_ptr(stats->last, cpu);
 		last->last_sector = (sector_t)ULLONG_MAX;
 		last->last_rw = UINT_MAX;
 	}
+
+	return 0;
 }
 
 void dm_stats_cleanup(struct dm_stats *stats)
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
index 2ddfae678f32..dcac11fce03b 100644
--- a/drivers/md/dm-stats.h
+++ b/drivers/md/dm-stats.h
@@ -22,7 +22,7 @@ struct dm_stats_aux {
 	unsigned long long duration_ns;
 };
 
-void dm_stats_init(struct dm_stats *st);
+int dm_stats_init(struct dm_stats *st);
 void dm_stats_cleanup(struct dm_stats *st);
 
 struct mapped_device;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 969ea013c74e..a1bbf00e60e5 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3365,6 +3365,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
 	pt->low_water_blocks = low_water_blocks;
 	pt->adjusted_pf = pt->requested_pf = pf;
 	ti->num_flush_bios = 1;
+	ti->limit_swap_bios = true;
 
 	/*
 	 * Only need to enable discards if the pool should pass
@@ -4245,6 +4246,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 		goto bad;
 
 	ti->num_flush_bios = 1;
+	ti->limit_swap_bios = true;
 	ti->flush_supported = true;
 	ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3d9a77f4e20f..9a9b2adcf39e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2021,7 +2021,9 @@ static struct mapped_device *alloc_dev(int minor)
 	bio_set_dev(&md->flush_bio, md->bdev);
 	md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
 
-	dm_stats_init(&md->stats);
+	r = dm_stats_init(&md->stats);
+	if (r < 0)
+		goto bad;
 
 	/* Populate the mapping, nobody knows we exist yet */
 	spin_lock(&_minor_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 89d4dcc5253e..f8c111b36992 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2991,6 +2991,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
 		err = kstrtouint(buf, 10, (unsigned int *)&slot);
 		if (err < 0)
 			return err;
+		if (slot < 0)
+			/* overflow */
+			return -ENOSPC;
 	}
 	if (rdev->mddev->pers && slot == -1) {
 		/* Setting 'slot' on an active array requires also
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index be064bcfd70a..6b310f723580 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2237,9 +2237,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
 	 * If this is the upstream port for this switch, enable
 	 * forwarding of unknown unicasts and multicasts.
 	 */
-	reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
-		MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
+	reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
 		MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+	/* Forward any IPv4 IGMP or IPv6 MLD frames received
+	 * by a USER port to the CPU port to allow snooping.
+	 */
+	if (dsa_is_user_port(ds, port))
+		reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
+
 	err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
 	if (err)
 		return err;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 1ab613eb5796..b542aba6f0e8 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -235,20 +235,27 @@ config I40E_DCB
 
 	  If unsure, say N.
 
+# this is here to allow seamless migration from I40EVF --> IAVF name
+# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF
+config IAVF
+	tristate
 config I40EVF
 	tristate "Intel(R) Ethernet Adaptive Virtual Function support"
+	select IAVF
 	depends on PCI_MSI
 	---help---
 	  This driver supports virtual functions for Intel XL710,
-	  X710, X722, and all devices advertising support for Intel
-	  Ethernet Adaptive Virtual Function devices. For more
+	  X710, X722, XXV710, and all devices advertising support for
+	  Intel Ethernet Adaptive Virtual Function devices. For more
 	  information on how to identify your adapter, go to the Adapter
 	  & Driver ID Guide that can be located at:
 
-	  <http://support.intel.com>
+	  <https://support.intel.com>
+
+	  This driver was formerly named i40evf.
 
 	  To compile this driver as a module, choose M here. The module
-	  will be called i40evf.  MSI-X interrupt support is required
+	  will be called iavf.  MSI-X interrupt support is required
 	  for this driver to work correctly.
 
 config ICE
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 807a4f8c7e4e..b91153df6ee8 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -12,6 +12,6 @@ obj-$(CONFIG_IXGBE) += ixgbe/
 obj-$(CONFIG_IXGBEVF) += ixgbevf/
 obj-$(CONFIG_I40E) += i40e/
 obj-$(CONFIG_IXGB) += ixgb/
-obj-$(CONFIG_I40EVF) += i40evf/
+obj-$(CONFIG_IAVF) += iavf/
 obj-$(CONFIG_FM10K) += fm10k/
 obj-$(CONFIG_ICE) += ice/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index ef4d3762bf37..ca229b0efeb6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
 	return 0;
 }
 
-struct i40e_diag_reg_test_info i40e_reg_list[] = {
+const struct i40e_diag_reg_test_info i40e_reg_list[] = {
 	/* offset               mask         elements   stride */
 	{I40E_QTX_CTL(0),       0x0000FFBF, 1,
 		I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
@@ -78,27 +78,28 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
 {
 	i40e_status ret_code = 0;
 	u32 reg, mask;
+	u32 elements;
 	u32 i, j;
 
 	for (i = 0; i40e_reg_list[i].offset != 0 &&
 					     !ret_code; i++) {
 
+		elements = i40e_reg_list[i].elements;
 		/* set actual reg range for dynamically allocated resources */
 		if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
 		    hw->func_caps.num_tx_qp != 0)
-			i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+			elements = hw->func_caps.num_tx_qp;
 		if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
 		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
 		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
 		     i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
 		     i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
 		    hw->func_caps.num_msix_vectors != 0)
-			i40e_reg_list[i].elements =
-				hw->func_caps.num_msix_vectors - 1;
+			elements = hw->func_caps.num_msix_vectors - 1;
 
 		/* test register access */
 		mask = i40e_reg_list[i].mask;
-		for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
+		for (j = 0; j < elements && !ret_code; j++) {
 			reg = i40e_reg_list[i].offset +
 			      (j * i40e_reg_list[i].stride);
 			ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index c3340f320a18..1db7c6d57231 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info {
 	u32 stride;	/* bytes between each element */
 };
 
-extern struct i40e_diag_reg_test_info i40e_reg_list[];
+extern const struct i40e_diag_reg_test_info i40e_reg_list[];
 
 i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
 i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 240083201dbf..1527c67b487b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2595,7 +2595,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
 		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
 		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
 			dev_err(&pf->pdev->dev,
-				"VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
 			return -EPERM;
 		}
 	}
@@ -4019,9 +4019,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
 			 mac, vf_id);
 	}
 
-	/* Force the VF driver stop so it has to reload with new MAC address */
+	/* Force the VF interface down so it has to bring up with new MAC
+	 * address
+	 */
 	i40e_vc_disable_vf(vf);
-	dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
+	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
 
 error_param:
 	return ret;
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
deleted file mode 100644
index 3c5c6e962280..000000000000
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 2013 - 2018 Intel Corporation.
-
-#
-## Makefile for the Intel(R) 40GbE VF driver
-#
-#
-
-ccflags-y += -I$(src)
-subdir-ccflags-y += -I$(src)
-
-obj-$(CONFIG_I40EVF) += i40evf.o
-
-i40evf-objs :=	i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
-		i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o
-
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
deleted file mode 100644
index 21a0dbf6ccf6..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ /dev/null
@@ -1,967 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#include "i40e_status.h"
-#include "i40e_type.h"
-#include "i40e_register.h"
-#include "i40e_adminq.h"
-#include "i40e_prototype.h"
-
-/**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
-	return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
-	       (desc->opcode == i40e_aqc_opc_nvm_update);
-}
-
-/**
- *  i40e_adminq_init_regs - Initialize AdminQ registers
- *  @hw: pointer to the hardware structure
- *
- *  This assumes the alloc_asq and alloc_arq functions have already been called
- **/
-static void i40e_adminq_init_regs(struct i40e_hw *hw)
-{
-	/* set head and tail registers in our local struct */
-	if (i40e_is_vf(hw)) {
-		hw->aq.asq.tail = I40E_VF_ATQT1;
-		hw->aq.asq.head = I40E_VF_ATQH1;
-		hw->aq.asq.len  = I40E_VF_ATQLEN1;
-		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
-		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
-		hw->aq.arq.tail = I40E_VF_ARQT1;
-		hw->aq.arq.head = I40E_VF_ARQH1;
-		hw->aq.arq.len  = I40E_VF_ARQLEN1;
-		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
-		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
-	}
-}
-
-/**
- *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
- *  @hw: pointer to the hardware structure
- **/
-static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
-{
-	i40e_status ret_code;
-
-	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
-					 i40e_mem_atq_ring,
-					 (hw->aq.num_asq_entries *
-					 sizeof(struct i40e_aq_desc)),
-					 I40E_ADMINQ_DESC_ALIGNMENT);
-	if (ret_code)
-		return ret_code;
-
-	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
-					  (hw->aq.num_asq_entries *
-					  sizeof(struct i40e_asq_cmd_details)));
-	if (ret_code) {
-		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
-		return ret_code;
-	}
-
-	return ret_code;
-}
-
-/**
- *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
- *  @hw: pointer to the hardware structure
- **/
-static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
-{
-	i40e_status ret_code;
-
-	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
-					 i40e_mem_arq_ring,
-					 (hw->aq.num_arq_entries *
-					 sizeof(struct i40e_aq_desc)),
-					 I40E_ADMINQ_DESC_ALIGNMENT);
-
-	return ret_code;
-}
-
-/**
- *  i40e_free_adminq_asq - Free Admin Queue send rings
- *  @hw: pointer to the hardware structure
- *
- *  This assumes the posted send buffers have already been cleaned
- *  and de-allocated
- **/
-static void i40e_free_adminq_asq(struct i40e_hw *hw)
-{
-	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
-}
-
-/**
- *  i40e_free_adminq_arq - Free Admin Queue receive rings
- *  @hw: pointer to the hardware structure
- *
- *  This assumes the posted receive buffers have already been cleaned
- *  and de-allocated
- **/
-static void i40e_free_adminq_arq(struct i40e_hw *hw)
-{
-	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
-}
-
-/**
- *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
- *  @hw: pointer to the hardware structure
- **/
-static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
-{
-	i40e_status ret_code;
-	struct i40e_aq_desc *desc;
-	struct i40e_dma_mem *bi;
-	int i;
-
-	/* We'll be allocating the buffer info memory first, then we can
-	 * allocate the mapped buffers for the event processing
-	 */
-
-	/* buffer_info structures do not need alignment */
-	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
-		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
-	if (ret_code)
-		goto alloc_arq_bufs;
-	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
-
-	/* allocate the mapped buffers */
-	for (i = 0; i < hw->aq.num_arq_entries; i++) {
-		bi = &hw->aq.arq.r.arq_bi[i];
-		ret_code = i40e_allocate_dma_mem(hw, bi,
-						 i40e_mem_arq_buf,
-						 hw->aq.arq_buf_size,
-						 I40E_ADMINQ_DESC_ALIGNMENT);
-		if (ret_code)
-			goto unwind_alloc_arq_bufs;
-
-		/* now configure the descriptors for use */
-		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
-
-		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
-		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
-			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
-		desc->opcode = 0;
-		/* This is in accordance with Admin queue design, there is no
-		 * register for buffer size configuration
-		 */
-		desc->datalen = cpu_to_le16((u16)bi->size);
-		desc->retval = 0;
-		desc->cookie_high = 0;
-		desc->cookie_low = 0;
-		desc->params.external.addr_high =
-			cpu_to_le32(upper_32_bits(bi->pa));
-		desc->params.external.addr_low =
-			cpu_to_le32(lower_32_bits(bi->pa));
-		desc->params.external.param0 = 0;
-		desc->params.external.param1 = 0;
-	}
-
-alloc_arq_bufs:
-	return ret_code;
-
-unwind_alloc_arq_bufs:
-	/* don't try to free the one that failed... */
-	i--;
-	for (; i >= 0; i--)
-		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
-	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
-
-	return ret_code;
-}
-
-/**
- *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
- *  @hw: pointer to the hardware structure
- **/
-static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
-{
-	i40e_status ret_code;
-	struct i40e_dma_mem *bi;
-	int i;
-
-	/* No mapped memory needed yet, just the buffer info structures */
-	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
-		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
-	if (ret_code)
-		goto alloc_asq_bufs;
-	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
-
-	/* allocate the mapped buffers */
-	for (i = 0; i < hw->aq.num_asq_entries; i++) {
-		bi = &hw->aq.asq.r.asq_bi[i];
-		ret_code = i40e_allocate_dma_mem(hw, bi,
-						 i40e_mem_asq_buf,
-						 hw->aq.asq_buf_size,
-						 I40E_ADMINQ_DESC_ALIGNMENT);
-		if (ret_code)
-			goto unwind_alloc_asq_bufs;
-	}
-alloc_asq_bufs:
-	return ret_code;
-
-unwind_alloc_asq_bufs:
-	/* don't try to free the one that failed... */
-	i--;
-	for (; i >= 0; i--)
-		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
-	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
-
-	return ret_code;
-}
-
-/**
- *  i40e_free_arq_bufs - Free receive queue buffer info elements
- *  @hw: pointer to the hardware structure
- **/
-static void i40e_free_arq_bufs(struct i40e_hw *hw)
-{
-	int i;
-
-	/* free descriptors */
-	for (i = 0; i < hw->aq.num_arq_entries; i++)
-		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
-
-	/* free the descriptor memory */
-	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
-
-	/* free the dma header */
-	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
-}
-
-/**
- *  i40e_free_asq_bufs - Free send queue buffer info elements
- *  @hw: pointer to the hardware structure
- **/
-static void i40e_free_asq_bufs(struct i40e_hw *hw)
-{
-	int i;
-
-	/* only unmap if the address is non-NULL */
-	for (i = 0; i < hw->aq.num_asq_entries; i++)
-		if (hw->aq.asq.r.asq_bi[i].pa)
-			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
-
-	/* free the buffer info list */
-	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
-
-	/* free the descriptor memory */
-	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
-
-	/* free the dma header */
-	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
-}
-
-/**
- *  i40e_config_asq_regs - configure ASQ registers
- *  @hw: pointer to the hardware structure
- *
- *  Configure base address and length registers for the transmit queue
- **/
-static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
-{
-	i40e_status ret_code = 0;
-	u32 reg = 0;
-
-	/* Clear Head and Tail */
-	wr32(hw, hw->aq.asq.head, 0);
-	wr32(hw, hw->aq.asq.tail, 0);
-
-	/* set starting point */
-	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
-				  I40E_VF_ATQLEN1_ATQENABLE_MASK));
-	wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
-	wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
-
-	/* Check one register to verify that config was applied */
-	reg = rd32(hw, hw->aq.asq.bal);
-	if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
-		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
-
-	return ret_code;
-}
-
-/**
- *  i40e_config_arq_regs - ARQ register configuration
- *  @hw: pointer to the hardware structure
- *
- * Configure base address and length registers for the receive (event queue)
- **/
-static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
-{
-	i40e_status ret_code = 0;
-	u32 reg = 0;
-
-	/* Clear Head and Tail */
-	wr32(hw, hw->aq.arq.head, 0);
-	wr32(hw, hw->aq.arq.tail, 0);
-
-	/* set starting point */
-	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
-				  I40E_VF_ARQLEN1_ARQENABLE_MASK));
-	wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
-	wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
-
-	/* Update tail in the HW to post pre-allocated buffers */
-	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
-
-	/* Check one register to verify that config was applied */
-	reg = rd32(hw, hw->aq.arq.bal);
-	if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
-		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
-
-	return ret_code;
-}
-
-/**
- *  i40e_init_asq - main initialization routine for ASQ
- *  @hw: pointer to the hardware structure
- *
- *  This is the main initialization routine for the Admin Send Queue
- *  Prior to calling this function, drivers *MUST* set the following fields
- *  in the hw->aq structure:
- *     - hw->aq.num_asq_entries
- *     - hw->aq.arq_buf_size
- *
- *  Do *NOT* hold the lock when calling this as the memory allocation routines
- *  called are not going to be atomic context safe
- **/
-static i40e_status i40e_init_asq(struct i40e_hw *hw)
-{
-	i40e_status ret_code = 0;
-
-	if (hw->aq.asq.count > 0) {
-		/* queue already initialized */
-		ret_code = I40E_ERR_NOT_READY;
-		goto init_adminq_exit;
-	}
-
-	/* verify input for valid configuration */
-	if ((hw->aq.num_asq_entries == 0) ||
-	    (hw->aq.asq_buf_size == 0)) {
-		ret_code = I40E_ERR_CONFIG;
-		goto init_adminq_exit;
-	}
-
-	hw->aq.asq.next_to_use = 0;
-	hw->aq.asq.next_to_clean = 0;
-
-	/* allocate the ring memory */
-	ret_code = i40e_alloc_adminq_asq_ring(hw);
-	if (ret_code)
-		goto init_adminq_exit;
-
-	/* allocate buffers in the rings */
-	ret_code = i40e_alloc_asq_bufs(hw);
-	if (ret_code)
-		goto init_adminq_free_rings;
-
-	/* initialize base registers */
-	ret_code = i40e_config_asq_regs(hw);
-	if (ret_code)
-		goto init_adminq_free_rings;
-
-	/* success! */
-	hw->aq.asq.count = hw->aq.num_asq_entries;
-	goto init_adminq_exit;
-
-init_adminq_free_rings:
-	i40e_free_adminq_asq(hw);
-
-init_adminq_exit:
-	return ret_code;
-}
-
-/**
- *  i40e_init_arq - initialize ARQ
- *  @hw: pointer to the hardware structure
- *
- *  The main initialization routine for the Admin Receive (Event) Queue.
- *  Prior to calling this function, drivers *MUST* set the following fields
- *  in the hw->aq structure:
- *     - hw->aq.num_asq_entries
- *     - hw->aq.arq_buf_size
- *
- *  Do *NOT* hold the lock when calling this as the memory allocation routines
- *  called are not going to be atomic context safe
- **/
-static i40e_status i40e_init_arq(struct i40e_hw *hw)
-{
-	i40e_status ret_code = 0;
-
-	if (hw->aq.arq.count > 0) {
-		/* queue already initialized */
-		ret_code = I40E_ERR_NOT_READY;
-		goto init_adminq_exit;
-	}
-
-	/* verify input for valid configuration */
-	if ((hw->aq.num_arq_entries == 0) ||
-	    (hw->aq.arq_buf_size == 0)) {
-		ret_code = I40E_ERR_CONFIG;
-		goto init_adminq_exit;
-	}
-
-	hw->aq.arq.next_to_use = 0;
-	hw->aq.arq.next_to_clean = 0;
-
-	/* allocate the ring memory */
-	ret_code = i40e_alloc_adminq_arq_ring(hw);
-	if (ret_code)
-		goto init_adminq_exit;
-
-	/* allocate buffers in the rings */
-	ret_code = i40e_alloc_arq_bufs(hw);
-	if (ret_code)
-		goto init_adminq_free_rings;
-
-	/* initialize base registers */
-	ret_code = i40e_config_arq_regs(hw);
-	if (ret_code)
-		goto init_adminq_free_rings;
-
-	/* success! */
-	hw->aq.arq.count = hw->aq.num_arq_entries;
-	goto init_adminq_exit;
-
-init_adminq_free_rings:
-	i40e_free_adminq_arq(hw);
-
-init_adminq_exit:
-	return ret_code;
-}
-
-/**
- *  i40e_shutdown_asq - shutdown the ASQ
- *  @hw: pointer to the hardware structure
- *
- *  The main shutdown routine for the Admin Send Queue
- **/
-static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
-{
-	i40e_status ret_code = 0;
-
-	mutex_lock(&hw->aq.asq_mutex);
-
-	if (hw->aq.asq.count == 0) {
-		ret_code = I40E_ERR_NOT_READY;
-		goto shutdown_asq_out;
-	}
-
-	/* Stop firmware AdminQ processing */
-	wr32(hw, hw->aq.asq.head, 0);
-	wr32(hw, hw->aq.asq.tail, 0);
-	wr32(hw, hw->aq.asq.len, 0);
-	wr32(hw, hw->aq.asq.bal, 0);
-	wr32(hw, hw->aq.asq.bah, 0);
-
-	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
-
-	/* free ring buffers */
-	i40e_free_asq_bufs(hw);
-
-shutdown_asq_out:
-	mutex_unlock(&hw->aq.asq_mutex);
-	return ret_code;
-}
-
-/**
- *  i40e_shutdown_arq - shutdown ARQ
- *  @hw: pointer to the hardware structure
- *
- *  The main shutdown routine for the Admin Receive Queue
- **/
-static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
-{
-	i40e_status ret_code = 0;
-
-	mutex_lock(&hw->aq.arq_mutex);
-
-	if (hw->aq.arq.count == 0) {
-		ret_code = I40E_ERR_NOT_READY;
-		goto shutdown_arq_out;
-	}
-
-	/* Stop firmware AdminQ processing */
-	wr32(hw, hw->aq.arq.head, 0);
-	wr32(hw, hw->aq.arq.tail, 0);
-	wr32(hw, hw->aq.arq.len, 0);
-	wr32(hw, hw->aq.arq.bal, 0);
-	wr32(hw, hw->aq.arq.bah, 0);
-
-	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
-
-	/* free ring buffers */
-	i40e_free_arq_bufs(hw);
-
-shutdown_arq_out:
-	mutex_unlock(&hw->aq.arq_mutex);
-	return ret_code;
-}
-
-/**
- *  i40evf_init_adminq - main initialization routine for Admin Queue
- *  @hw: pointer to the hardware structure
- *
- *  Prior to calling this function, drivers *MUST* set the following fields
- *  in the hw->aq structure:
- *     - hw->aq.num_asq_entries
- *     - hw->aq.num_arq_entries
- *     - hw->aq.arq_buf_size
- *     - hw->aq.asq_buf_size
- **/
-i40e_status i40evf_init_adminq(struct i40e_hw *hw)
-{
-	i40e_status ret_code;
-
-	/* verify input for valid configuration */
-	if ((hw->aq.num_arq_entries == 0) ||
-	    (hw->aq.num_asq_entries == 0) ||
-	    (hw->aq.arq_buf_size == 0) ||
-	    (hw->aq.asq_buf_size == 0)) {
-		ret_code = I40E_ERR_CONFIG;
-		goto init_adminq_exit;
-	}
-
-	/* Set up register offsets */
-	i40e_adminq_init_regs(hw);
-
-	/* setup ASQ command write back timeout */
-	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
-
-	/* allocate the ASQ */
-	ret_code = i40e_init_asq(hw);
-	if (ret_code)
-		goto init_adminq_destroy_locks;
-
-	/* allocate the ARQ */
-	ret_code = i40e_init_arq(hw);
-	if (ret_code)
-		goto init_adminq_free_asq;
-
-	/* success! */
-	goto init_adminq_exit;
-
-init_adminq_free_asq:
-	i40e_shutdown_asq(hw);
-init_adminq_destroy_locks:
-
-init_adminq_exit:
-	return ret_code;
-}
-
-/**
- *  i40evf_shutdown_adminq - shutdown routine for the Admin Queue
- *  @hw: pointer to the hardware structure
- **/
-i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
-{
-	i40e_status ret_code = 0;
-
-	if (i40evf_check_asq_alive(hw))
-		i40evf_aq_queue_shutdown(hw, true);
-
-	i40e_shutdown_asq(hw);
-	i40e_shutdown_arq(hw);
-
-	if (hw->nvm_buff.va)
-		i40e_free_virt_mem(hw, &hw->nvm_buff);
-
-	return ret_code;
-}
-
-/**
- *  i40e_clean_asq - cleans Admin send queue
- *  @hw: pointer to the hardware structure
- *
- *  returns the number of free desc
- **/
-static u16 i40e_clean_asq(struct i40e_hw *hw)
-{
-	struct i40e_adminq_ring *asq = &(hw->aq.asq);
-	struct i40e_asq_cmd_details *details;
-	u16 ntc = asq->next_to_clean;
-	struct i40e_aq_desc desc_cb;
-	struct i40e_aq_desc *desc;
-
-	desc = I40E_ADMINQ_DESC(*asq, ntc);
-	details = I40E_ADMINQ_DETAILS(*asq, ntc);
-	while (rd32(hw, hw->aq.asq.head) != ntc) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
-
-		if (details->callback) {
-			I40E_ADMINQ_CALLBACK cb_func =
-					(I40E_ADMINQ_CALLBACK)details->callback;
-			desc_cb = *desc;
-			cb_func(hw, &desc_cb);
-		}
-		memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
-		memset((void *)details, 0,
-		       sizeof(struct i40e_asq_cmd_details));
-		ntc++;
-		if (ntc == asq->count)
-			ntc = 0;
-		desc = I40E_ADMINQ_DESC(*asq, ntc);
-		details = I40E_ADMINQ_DETAILS(*asq, ntc);
-	}
-
-	asq->next_to_clean = ntc;
-
-	return I40E_DESC_UNUSED(asq);
-}
-
-/**
- *  i40evf_asq_done - check if FW has processed the Admin Send Queue
- *  @hw: pointer to the hw struct
- *
- *  Returns true if the firmware has processed all descriptors on the
- *  admin send queue. Returns false if there are still requests pending.
- **/
-bool i40evf_asq_done(struct i40e_hw *hw)
-{
-	/* AQ designers suggest use of head for better
-	 * timing reliability than DD bit
-	 */
-	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
-
-}
-
-/**
- *  i40evf_asq_send_command - send command to Admin Queue
- *  @hw: pointer to the hw struct
- *  @desc: prefilled descriptor describing the command (non DMA mem)
- *  @buff: buffer to use for indirect commands
- *  @buff_size: size of buffer for indirect commands
- *  @cmd_details: pointer to command details structure
- *
- *  This is the main send command driver routine for the Admin Queue send
- *  queue.  It runs the queue, cleans the queue, etc
- **/
-i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
-				struct i40e_aq_desc *desc,
-				void *buff, /* can be NULL */
-				u16  buff_size,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	i40e_status status = 0;
-	struct i40e_dma_mem *dma_buff = NULL;
-	struct i40e_asq_cmd_details *details;
-	struct i40e_aq_desc *desc_on_ring;
-	bool cmd_completed = false;
-	u16  retval = 0;
-	u32  val = 0;
-
-	mutex_lock(&hw->aq.asq_mutex);
-
-	if (hw->aq.asq.count == 0) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: Admin queue not initialized.\n");
-		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_error;
-	}
-
-	hw->aq.asq_last_status = I40E_AQ_RC_OK;
-
-	val = rd32(hw, hw->aq.asq.head);
-	if (val >= hw->aq.num_asq_entries) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: head overrun at %d\n", val);
-		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_error;
-	}
-
-	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
-	if (cmd_details) {
-		*details = *cmd_details;
-
-		/* If the cmd_details are defined copy the cookie.  The
-		 * cpu_to_le32 is not needed here because the data is ignored
-		 * by the FW, only used by the driver
-		 */
-		if (details->cookie) {
-			desc->cookie_high =
-				cpu_to_le32(upper_32_bits(details->cookie));
-			desc->cookie_low =
-				cpu_to_le32(lower_32_bits(details->cookie));
-		}
-	} else {
-		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
-	}
-
-	/* clear requested flags and then set additional flags if defined */
-	desc->flags &= ~cpu_to_le16(details->flags_dis);
-	desc->flags |= cpu_to_le16(details->flags_ena);
-
-	if (buff_size > hw->aq.asq_buf_size) {
-		i40e_debug(hw,
-			   I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: Invalid buffer size: %d.\n",
-			   buff_size);
-		status = I40E_ERR_INVALID_SIZE;
-		goto asq_send_command_error;
-	}
-
-	if (details->postpone && !details->async) {
-		i40e_debug(hw,
-			   I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: Async flag not set along with postpone flag");
-		status = I40E_ERR_PARAM;
-		goto asq_send_command_error;
-	}
-
-	/* call clean and check queue available function to reclaim the
-	 * descriptors that were processed by FW, the function returns the
-	 * number of desc available
-	 */
-	/* the clean function called here could be called in a separate thread
-	 * in case of asynchronous completions
-	 */
-	if (i40e_clean_asq(hw) == 0) {
-		i40e_debug(hw,
-			   I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: Error queue is full.\n");
-		status = I40E_ERR_ADMIN_QUEUE_FULL;
-		goto asq_send_command_error;
-	}
-
-	/* initialize the temp desc pointer with the right desc */
-	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
-
-	/* if the desc is available copy the temp desc to the right place */
-	*desc_on_ring = *desc;
-
-	/* if buff is not NULL assume indirect command */
-	if (buff != NULL) {
-		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
-		/* copy the user buff into the respective DMA buff */
-		memcpy(dma_buff->va, buff, buff_size);
-		desc_on_ring->datalen = cpu_to_le16(buff_size);
-
-		/* Update the address values in the desc with the pa value
-		 * for respective buffer
-		 */
-		desc_on_ring->params.external.addr_high =
-				cpu_to_le32(upper_32_bits(dma_buff->pa));
-		desc_on_ring->params.external.addr_low =
-				cpu_to_le32(lower_32_bits(dma_buff->pa));
-	}
-
-	/* bump the tail */
-	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
-	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
-			buff, buff_size);
-	(hw->aq.asq.next_to_use)++;
-	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
-		hw->aq.asq.next_to_use = 0;
-	if (!details->postpone)
-		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
-
-	/* if cmd_details are not defined or async flag is not set,
-	 * we need to wait for desc write back
-	 */
-	if (!details->async && !details->postpone) {
-		u32 total_delay = 0;
-
-		do {
-			/* AQ designers suggest use of head for better
-			 * timing reliability than DD bit
-			 */
-			if (i40evf_asq_done(hw))
-				break;
-			udelay(50);
-			total_delay += 50;
-		} while (total_delay < hw->aq.asq_cmd_timeout);
-	}
-
-	/* if ready, copy the desc back to temp */
-	if (i40evf_asq_done(hw)) {
-		*desc = *desc_on_ring;
-		if (buff != NULL)
-			memcpy(buff, dma_buff->va, buff_size);
-		retval = le16_to_cpu(desc->retval);
-		if (retval != 0) {
-			i40e_debug(hw,
-				   I40E_DEBUG_AQ_MESSAGE,
-				   "AQTX: Command completed with error 0x%X.\n",
-				   retval);
-
-			/* strip off FW internal code */
-			retval &= 0xff;
-		}
-		cmd_completed = true;
-		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
-			status = 0;
-		else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
-			status = I40E_ERR_NOT_READY;
-		else
-			status = I40E_ERR_ADMIN_QUEUE_ERROR;
-		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
-	}
-
-	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-		   "AQTX: desc and buffer writeback:\n");
-	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
-			buff_size);
-
-	/* save writeback aq if requested */
-	if (details->wb_desc)
-		*details->wb_desc = *desc_on_ring;
-
-	/* update the error if time out occurred */
-	if ((!cmd_completed) &&
-	    (!details->async && !details->postpone)) {
-		if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
-			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-				   "AQTX: AQ Critical error.\n");
-			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
-		} else {
-			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-				   "AQTX: Writeback timeout.\n");
-			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
-		}
-	}
-
-asq_send_command_error:
-	mutex_unlock(&hw->aq.asq_mutex);
-	return status;
-}
-
-/**
- *  i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
- *  @desc:     pointer to the temp descriptor (non DMA mem)
- *  @opcode:   the opcode can be used to decide which flags to turn off or on
- *
- *  Fill the desc with default values
- **/
-void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
-				       u16 opcode)
-{
-	/* zero out the desc */
-	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
-	desc->opcode = cpu_to_le16(opcode);
-	desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
-}
-
-/**
- *  i40evf_clean_arq_element
- *  @hw: pointer to the hw struct
- *  @e: event info from the receive descriptor, includes any buffers
- *  @pending: number of events that could be left to process
- *
- *  This function cleans one Admin Receive Queue element and returns
- *  the contents through e.  It can also return how many events are
- *  left to process through 'pending'
- **/
-i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
-					     struct i40e_arq_event_info *e,
-					     u16 *pending)
-{
-	i40e_status ret_code = 0;
-	u16 ntc = hw->aq.arq.next_to_clean;
-	struct i40e_aq_desc *desc;
-	struct i40e_dma_mem *bi;
-	u16 desc_idx;
-	u16 datalen;
-	u16 flags;
-	u16 ntu;
-
-	/* pre-clean the event info */
-	memset(&e->desc, 0, sizeof(e->desc));
-
-	/* take the lock before we start messing with the ring */
-	mutex_lock(&hw->aq.arq_mutex);
-
-	if (hw->aq.arq.count == 0) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "AQRX: Admin queue not initialized.\n");
-		ret_code = I40E_ERR_QUEUE_EMPTY;
-		goto clean_arq_element_err;
-	}
-
-	/* set next_to_use to head */
-	ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
-	if (ntu == ntc) {
-		/* nothing to do - shouldn't need to update ring's values */
-		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
-		goto clean_arq_element_out;
-	}
-
-	/* now clean the next descriptor */
-	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
-	desc_idx = ntc;
-
-	hw->aq.arq_last_status =
-		(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
-	flags = le16_to_cpu(desc->flags);
-	if (flags & I40E_AQ_FLAG_ERR) {
-		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
-		i40e_debug(hw,
-			   I40E_DEBUG_AQ_MESSAGE,
-			   "AQRX: Event received with error 0x%X.\n",
-			   hw->aq.arq_last_status);
-	}
-
-	e->desc = *desc;
-	datalen = le16_to_cpu(desc->datalen);
-	e->msg_len = min(datalen, e->buf_len);
-	if (e->msg_buf != NULL && (e->msg_len != 0))
-		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
-		       e->msg_len);
-
-	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
-	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
-			hw->aq.arq_buf_size);
-
-	/* Restore the original datalen and buffer address in the desc,
-	 * FW updates datalen to indicate the event message
-	 * size
-	 */
-	bi = &hw->aq.arq.r.arq_bi[ntc];
-	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
-
-	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
-	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
-		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
-	desc->datalen = cpu_to_le16((u16)bi->size);
-	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
-	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
-
-	/* set tail = the last cleaned desc index. */
-	wr32(hw, hw->aq.arq.tail, ntc);
-	/* ntc is updated to tail + 1 */
-	ntc++;
-	if (ntc == hw->aq.num_arq_entries)
-		ntc = 0;
-	hw->aq.arq.next_to_clean = ntc;
-	hw->aq.arq.next_to_use = ntu;
-
-clean_arq_element_out:
-	/* Set pending if needed, unlock and return */
-	if (pending != NULL)
-		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
-
-clean_arq_element_err:
-	mutex_unlock(&hw->aq.arq_mutex);
-
-	return ret_code;
-}
-
-void i40evf_resume_aq(struct i40e_hw *hw)
-{
-	/* Registers are reset after PF reset */
-	hw->aq.asq.next_to_use = 0;
-	hw->aq.asq.next_to_clean = 0;
-
-	i40e_config_asq_regs(hw);
-
-	hw->aq.arq.next_to_use = 0;
-	hw->aq.arq.next_to_clean = 0;
-
-	i40e_config_arq_regs(hw);
-}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
deleted file mode 100644
index 1f264b9b6805..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_ADMINQ_H_
-#define _I40E_ADMINQ_H_
-
-#include "i40e_osdep.h"
-#include "i40e_status.h"
-#include "i40e_adminq_cmd.h"
-
-#define I40E_ADMINQ_DESC(R, i)   \
-	(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
-
-#define I40E_ADMINQ_DESC_ALIGNMENT 4096
-
-struct i40e_adminq_ring {
-	struct i40e_virt_mem dma_head;	/* space for dma structures */
-	struct i40e_dma_mem desc_buf;	/* descriptor ring memory */
-	struct i40e_virt_mem cmd_buf;	/* command buffer memory */
-
-	union {
-		struct i40e_dma_mem *asq_bi;
-		struct i40e_dma_mem *arq_bi;
-	} r;
-
-	u16 count;		/* Number of descriptors */
-	u16 rx_buf_len;		/* Admin Receive Queue buffer length */
-
-	/* used for interrupt processing */
-	u16 next_to_use;
-	u16 next_to_clean;
-
-	/* used for queue tracking */
-	u32 head;
-	u32 tail;
-	u32 len;
-	u32 bah;
-	u32 bal;
-};
-
-/* ASQ transaction details */
-struct i40e_asq_cmd_details {
-	void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
-	u64 cookie;
-	u16 flags_ena;
-	u16 flags_dis;
-	bool async;
-	bool postpone;
-	struct i40e_aq_desc *wb_desc;
-};
-
-#define I40E_ADMINQ_DETAILS(R, i)   \
-	(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
-
-/* ARQ event information */
-struct i40e_arq_event_info {
-	struct i40e_aq_desc desc;
-	u16 msg_len;
-	u16 buf_len;
-	u8 *msg_buf;
-};
-
-/* Admin Queue information */
-struct i40e_adminq_info {
-	struct i40e_adminq_ring arq;    /* receive queue */
-	struct i40e_adminq_ring asq;    /* send queue */
-	u32 asq_cmd_timeout;            /* send queue cmd write back timeout*/
-	u16 num_arq_entries;            /* receive queue depth */
-	u16 num_asq_entries;            /* send queue depth */
-	u16 arq_buf_size;               /* receive queue buffer size */
-	u16 asq_buf_size;               /* send queue buffer size */
-	u16 fw_maj_ver;                 /* firmware major version */
-	u16 fw_min_ver;                 /* firmware minor version */
-	u32 fw_build;                   /* firmware build number */
-	u16 api_maj_ver;                /* api major version */
-	u16 api_min_ver;                /* api minor version */
-
-	struct mutex asq_mutex; /* Send queue lock */
-	struct mutex arq_mutex; /* Receive queue lock */
-
-	/* last status values on send and receive queues */
-	enum i40e_admin_queue_err asq_last_status;
-	enum i40e_admin_queue_err arq_last_status;
-};
-
-/**
- * i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_ret: AdminQ handler error code can override aq_rc
- * aq_rc: AdminQ firmware error code to convert
- **/
-static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
-{
-	int aq_to_posix[] = {
-		0,           /* I40E_AQ_RC_OK */
-		-EPERM,      /* I40E_AQ_RC_EPERM */
-		-ENOENT,     /* I40E_AQ_RC_ENOENT */
-		-ESRCH,      /* I40E_AQ_RC_ESRCH */
-		-EINTR,      /* I40E_AQ_RC_EINTR */
-		-EIO,        /* I40E_AQ_RC_EIO */
-		-ENXIO,      /* I40E_AQ_RC_ENXIO */
-		-E2BIG,      /* I40E_AQ_RC_E2BIG */
-		-EAGAIN,     /* I40E_AQ_RC_EAGAIN */
-		-ENOMEM,     /* I40E_AQ_RC_ENOMEM */
-		-EACCES,     /* I40E_AQ_RC_EACCES */
-		-EFAULT,     /* I40E_AQ_RC_EFAULT */
-		-EBUSY,      /* I40E_AQ_RC_EBUSY */
-		-EEXIST,     /* I40E_AQ_RC_EEXIST */
-		-EINVAL,     /* I40E_AQ_RC_EINVAL */
-		-ENOTTY,     /* I40E_AQ_RC_ENOTTY */
-		-ENOSPC,     /* I40E_AQ_RC_ENOSPC */
-		-ENOSYS,     /* I40E_AQ_RC_ENOSYS */
-		-ERANGE,     /* I40E_AQ_RC_ERANGE */
-		-EPIPE,      /* I40E_AQ_RC_EFLUSHED */
-		-ESPIPE,     /* I40E_AQ_RC_BAD_ADDR */
-		-EROFS,      /* I40E_AQ_RC_EMODE */
-		-EFBIG,      /* I40E_AQ_RC_EFBIG */
-	};
-
-	/* aq_rc is invalid if AQ timed out */
-	if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
-		return -EAGAIN;
-
-	if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
-		return -ERANGE;
-
-	return aq_to_posix[aq_rc];
-}
-
-/* general information */
-#define I40E_AQ_LARGE_BUF	512
-#define I40E_ASQ_CMD_TIMEOUT	250000  /* usecs */
-
-void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
-				       u16 opcode);
-
-#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
deleted file mode 100644
index 5fd8529465d4..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ /dev/null
@@ -1,2717 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_ADMINQ_CMD_H_
-#define _I40E_ADMINQ_CMD_H_
-
-/* This header file defines the i40e Admin Queue commands and is shared between
- * i40e Firmware and Software.
- *
- * This file needs to comply with the Linux Kernel coding style.
- */
-
-#define I40E_FW_API_VERSION_MAJOR	0x0001
-#define I40E_FW_API_VERSION_MINOR_X722	0x0005
-#define I40E_FW_API_VERSION_MINOR_X710	0x0007
-
-#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
-					I40E_FW_API_VERSION_MINOR_X710 : \
-					I40E_FW_API_VERSION_MINOR_X722)
-
-/* API version 1.7 implements additional link and PHY-specific APIs  */
-#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
-
-struct i40e_aq_desc {
-	__le16 flags;
-	__le16 opcode;
-	__le16 datalen;
-	__le16 retval;
-	__le32 cookie_high;
-	__le32 cookie_low;
-	union {
-		struct {
-			__le32 param0;
-			__le32 param1;
-			__le32 param2;
-			__le32 param3;
-		} internal;
-		struct {
-			__le32 param0;
-			__le32 param1;
-			__le32 addr_high;
-			__le32 addr_low;
-		} external;
-		u8 raw[16];
-	} params;
-};
-
-/* Flags sub-structure
- * |0  |1  |2  |3  |4  |5  |6  |7  |8  |9  |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * *  RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT	0
-#define I40E_AQ_FLAG_CMP_SHIFT	1
-#define I40E_AQ_FLAG_ERR_SHIFT	2
-#define I40E_AQ_FLAG_VFE_SHIFT	3
-#define I40E_AQ_FLAG_LB_SHIFT	9
-#define I40E_AQ_FLAG_RD_SHIFT	10
-#define I40E_AQ_FLAG_VFC_SHIFT	11
-#define I40E_AQ_FLAG_BUF_SHIFT	12
-#define I40E_AQ_FLAG_SI_SHIFT	13
-#define I40E_AQ_FLAG_EI_SHIFT	14
-#define I40E_AQ_FLAG_FE_SHIFT	15
-
-#define I40E_AQ_FLAG_DD		BIT(I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
-#define I40E_AQ_FLAG_CMP	BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
-#define I40E_AQ_FLAG_ERR	BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
-#define I40E_AQ_FLAG_VFE	BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
-#define I40E_AQ_FLAG_LB		BIT(I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
-#define I40E_AQ_FLAG_RD		BIT(I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
-#define I40E_AQ_FLAG_VFC	BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
-#define I40E_AQ_FLAG_BUF	BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI		BIT(I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
-#define I40E_AQ_FLAG_EI		BIT(I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
-#define I40E_AQ_FLAG_FE		BIT(I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
-
-/* error codes */
-enum i40e_admin_queue_err {
-	I40E_AQ_RC_OK		= 0,  /* success */
-	I40E_AQ_RC_EPERM	= 1,  /* Operation not permitted */
-	I40E_AQ_RC_ENOENT	= 2,  /* No such element */
-	I40E_AQ_RC_ESRCH	= 3,  /* Bad opcode */
-	I40E_AQ_RC_EINTR	= 4,  /* operation interrupted */
-	I40E_AQ_RC_EIO		= 5,  /* I/O error */
-	I40E_AQ_RC_ENXIO	= 6,  /* No such resource */
-	I40E_AQ_RC_E2BIG	= 7,  /* Arg too long */
-	I40E_AQ_RC_EAGAIN	= 8,  /* Try again */
-	I40E_AQ_RC_ENOMEM	= 9,  /* Out of memory */
-	I40E_AQ_RC_EACCES	= 10, /* Permission denied */
-	I40E_AQ_RC_EFAULT	= 11, /* Bad address */
-	I40E_AQ_RC_EBUSY	= 12, /* Device or resource busy */
-	I40E_AQ_RC_EEXIST	= 13, /* object already exists */
-	I40E_AQ_RC_EINVAL	= 14, /* Invalid argument */
-	I40E_AQ_RC_ENOTTY	= 15, /* Not a typewriter */
-	I40E_AQ_RC_ENOSPC	= 16, /* No space left or alloc failure */
-	I40E_AQ_RC_ENOSYS	= 17, /* Function not implemented */
-	I40E_AQ_RC_ERANGE	= 18, /* Parameter out of range */
-	I40E_AQ_RC_EFLUSHED	= 19, /* Cmd flushed due to prev cmd error */
-	I40E_AQ_RC_BAD_ADDR	= 20, /* Descriptor contains a bad pointer */
-	I40E_AQ_RC_EMODE	= 21, /* Op not allowed in current dev mode */
-	I40E_AQ_RC_EFBIG	= 22, /* File too large */
-};
-
-/* Admin Queue command opcodes */
-enum i40e_admin_queue_opc {
-	/* aq commands */
-	i40e_aqc_opc_get_version	= 0x0001,
-	i40e_aqc_opc_driver_version	= 0x0002,
-	i40e_aqc_opc_queue_shutdown	= 0x0003,
-	i40e_aqc_opc_set_pf_context	= 0x0004,
-
-	/* resource ownership */
-	i40e_aqc_opc_request_resource	= 0x0008,
-	i40e_aqc_opc_release_resource	= 0x0009,
-
-	i40e_aqc_opc_list_func_capabilities	= 0x000A,
-	i40e_aqc_opc_list_dev_capabilities	= 0x000B,
-
-	/* Proxy commands */
-	i40e_aqc_opc_set_proxy_config		= 0x0104,
-	i40e_aqc_opc_set_ns_proxy_table_entry	= 0x0105,
-
-	/* LAA */
-	i40e_aqc_opc_mac_address_read	= 0x0107,
-	i40e_aqc_opc_mac_address_write	= 0x0108,
-
-	/* PXE */
-	i40e_aqc_opc_clear_pxe_mode	= 0x0110,
-
-	/* WoL commands */
-	i40e_aqc_opc_set_wol_filter	= 0x0120,
-	i40e_aqc_opc_get_wake_reason	= 0x0121,
-
-	/* internal switch commands */
-	i40e_aqc_opc_get_switch_config		= 0x0200,
-	i40e_aqc_opc_add_statistics		= 0x0201,
-	i40e_aqc_opc_remove_statistics		= 0x0202,
-	i40e_aqc_opc_set_port_parameters	= 0x0203,
-	i40e_aqc_opc_get_switch_resource_alloc	= 0x0204,
-	i40e_aqc_opc_set_switch_config		= 0x0205,
-	i40e_aqc_opc_rx_ctl_reg_read		= 0x0206,
-	i40e_aqc_opc_rx_ctl_reg_write		= 0x0207,
-
-	i40e_aqc_opc_add_vsi			= 0x0210,
-	i40e_aqc_opc_update_vsi_parameters	= 0x0211,
-	i40e_aqc_opc_get_vsi_parameters		= 0x0212,
-
-	i40e_aqc_opc_add_pv			= 0x0220,
-	i40e_aqc_opc_update_pv_parameters	= 0x0221,
-	i40e_aqc_opc_get_pv_parameters		= 0x0222,
-
-	i40e_aqc_opc_add_veb			= 0x0230,
-	i40e_aqc_opc_update_veb_parameters	= 0x0231,
-	i40e_aqc_opc_get_veb_parameters		= 0x0232,
-
-	i40e_aqc_opc_delete_element		= 0x0243,
-
-	i40e_aqc_opc_add_macvlan		= 0x0250,
-	i40e_aqc_opc_remove_macvlan		= 0x0251,
-	i40e_aqc_opc_add_vlan			= 0x0252,
-	i40e_aqc_opc_remove_vlan		= 0x0253,
-	i40e_aqc_opc_set_vsi_promiscuous_modes	= 0x0254,
-	i40e_aqc_opc_add_tag			= 0x0255,
-	i40e_aqc_opc_remove_tag			= 0x0256,
-	i40e_aqc_opc_add_multicast_etag		= 0x0257,
-	i40e_aqc_opc_remove_multicast_etag	= 0x0258,
-	i40e_aqc_opc_update_tag			= 0x0259,
-	i40e_aqc_opc_add_control_packet_filter	= 0x025A,
-	i40e_aqc_opc_remove_control_packet_filter	= 0x025B,
-	i40e_aqc_opc_add_cloud_filters		= 0x025C,
-	i40e_aqc_opc_remove_cloud_filters	= 0x025D,
-	i40e_aqc_opc_clear_wol_switch_filters	= 0x025E,
-
-	i40e_aqc_opc_add_mirror_rule	= 0x0260,
-	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
-
-	/* Dynamic Device Personalization */
-	i40e_aqc_opc_write_personalization_profile	= 0x0270,
-	i40e_aqc_opc_get_personalization_profile_list	= 0x0271,
-
-	/* DCB commands */
-	i40e_aqc_opc_dcb_ignore_pfc	= 0x0301,
-	i40e_aqc_opc_dcb_updated	= 0x0302,
-	i40e_aqc_opc_set_dcb_parameters = 0x0303,
-
-	/* TX scheduler */
-	i40e_aqc_opc_configure_vsi_bw_limit		= 0x0400,
-	i40e_aqc_opc_configure_vsi_ets_sla_bw_limit	= 0x0406,
-	i40e_aqc_opc_configure_vsi_tc_bw		= 0x0407,
-	i40e_aqc_opc_query_vsi_bw_config		= 0x0408,
-	i40e_aqc_opc_query_vsi_ets_sla_config		= 0x040A,
-	i40e_aqc_opc_configure_switching_comp_bw_limit	= 0x0410,
-
-	i40e_aqc_opc_enable_switching_comp_ets			= 0x0413,
-	i40e_aqc_opc_modify_switching_comp_ets			= 0x0414,
-	i40e_aqc_opc_disable_switching_comp_ets			= 0x0415,
-	i40e_aqc_opc_configure_switching_comp_ets_bw_limit	= 0x0416,
-	i40e_aqc_opc_configure_switching_comp_bw_config		= 0x0417,
-	i40e_aqc_opc_query_switching_comp_ets_config		= 0x0418,
-	i40e_aqc_opc_query_port_ets_config			= 0x0419,
-	i40e_aqc_opc_query_switching_comp_bw_config		= 0x041A,
-	i40e_aqc_opc_suspend_port_tx				= 0x041B,
-	i40e_aqc_opc_resume_port_tx				= 0x041C,
-	i40e_aqc_opc_configure_partition_bw			= 0x041D,
-	/* hmc */
-	i40e_aqc_opc_query_hmc_resource_profile	= 0x0500,
-	i40e_aqc_opc_set_hmc_resource_profile	= 0x0501,
-
-	/* phy commands*/
-	i40e_aqc_opc_get_phy_abilities		= 0x0600,
-	i40e_aqc_opc_set_phy_config		= 0x0601,
-	i40e_aqc_opc_set_mac_config		= 0x0603,
-	i40e_aqc_opc_set_link_restart_an	= 0x0605,
-	i40e_aqc_opc_get_link_status		= 0x0607,
-	i40e_aqc_opc_set_phy_int_mask		= 0x0613,
-	i40e_aqc_opc_get_local_advt_reg		= 0x0614,
-	i40e_aqc_opc_set_local_advt_reg		= 0x0615,
-	i40e_aqc_opc_get_partner_advt		= 0x0616,
-	i40e_aqc_opc_set_lb_modes		= 0x0618,
-	i40e_aqc_opc_get_phy_wol_caps		= 0x0621,
-	i40e_aqc_opc_set_phy_debug		= 0x0622,
-	i40e_aqc_opc_upload_ext_phy_fm		= 0x0625,
-	i40e_aqc_opc_run_phy_activity		= 0x0626,
-	i40e_aqc_opc_set_phy_register		= 0x0628,
-	i40e_aqc_opc_get_phy_register		= 0x0629,
-
-	/* NVM commands */
-	i40e_aqc_opc_nvm_read			= 0x0701,
-	i40e_aqc_opc_nvm_erase			= 0x0702,
-	i40e_aqc_opc_nvm_update			= 0x0703,
-	i40e_aqc_opc_nvm_config_read		= 0x0704,
-	i40e_aqc_opc_nvm_config_write		= 0x0705,
-	i40e_aqc_opc_oem_post_update		= 0x0720,
-	i40e_aqc_opc_thermal_sensor		= 0x0721,
-
-	/* virtualization commands */
-	i40e_aqc_opc_send_msg_to_pf		= 0x0801,
-	i40e_aqc_opc_send_msg_to_vf		= 0x0802,
-	i40e_aqc_opc_send_msg_to_peer		= 0x0803,
-
-	/* alternate structure */
-	i40e_aqc_opc_alternate_write		= 0x0900,
-	i40e_aqc_opc_alternate_write_indirect	= 0x0901,
-	i40e_aqc_opc_alternate_read		= 0x0902,
-	i40e_aqc_opc_alternate_read_indirect	= 0x0903,
-	i40e_aqc_opc_alternate_write_done	= 0x0904,
-	i40e_aqc_opc_alternate_set_mode		= 0x0905,
-	i40e_aqc_opc_alternate_clear_port	= 0x0906,
-
-	/* LLDP commands */
-	i40e_aqc_opc_lldp_get_mib	= 0x0A00,
-	i40e_aqc_opc_lldp_update_mib	= 0x0A01,
-	i40e_aqc_opc_lldp_add_tlv	= 0x0A02,
-	i40e_aqc_opc_lldp_update_tlv	= 0x0A03,
-	i40e_aqc_opc_lldp_delete_tlv	= 0x0A04,
-	i40e_aqc_opc_lldp_stop		= 0x0A05,
-	i40e_aqc_opc_lldp_start		= 0x0A06,
-
-	/* Tunnel commands */
-	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
-	i40e_aqc_opc_del_udp_tunnel	= 0x0B01,
-	i40e_aqc_opc_set_rss_key	= 0x0B02,
-	i40e_aqc_opc_set_rss_lut	= 0x0B03,
-	i40e_aqc_opc_get_rss_key	= 0x0B04,
-	i40e_aqc_opc_get_rss_lut	= 0x0B05,
-
-	/* Async Events */
-	i40e_aqc_opc_event_lan_overflow		= 0x1001,
-
-	/* OEM commands */
-	i40e_aqc_opc_oem_parameter_change	= 0xFE00,
-	i40e_aqc_opc_oem_device_status_change	= 0xFE01,
-	i40e_aqc_opc_oem_ocsd_initialize	= 0xFE02,
-	i40e_aqc_opc_oem_ocbb_initialize	= 0xFE03,
-
-	/* debug commands */
-	i40e_aqc_opc_debug_read_reg		= 0xFF03,
-	i40e_aqc_opc_debug_write_reg		= 0xFF04,
-	i40e_aqc_opc_debug_modify_reg		= 0xFF07,
-	i40e_aqc_opc_debug_dump_internals	= 0xFF08,
-};
-
-/* command structures and indirect data structures */
-
-/* Structure naming conventions:
- * - no suffix for direct command descriptor structures
- * - _data for indirect sent data
- * - _resp for indirect return data (data which is both will use _data)
- * - _completion for direct return data
- * - _element_ for repeated elements (may also be _data or _resp)
- *
- * Command structures are expected to overlay the params.raw member of the basic
- * descriptor, and as such cannot exceed 16 bytes in length.
- */
-
-/* This macro is used to generate a compilation error if a structure
- * is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
- */
-#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
-	{ i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
-
-/* This macro is used extensively to ensure that command structures are 16
- * bytes in length as they have to map to the raw array of that size.
- */
-#define I40E_CHECK_CMD_LENGTH(X)	I40E_CHECK_STRUCT_LEN(16, X)
-
-/* internal (0x00XX) commands */
-
-/* Get version (direct 0x0001) */
-struct i40e_aqc_get_version {
-	__le32 rom_ver;
-	__le32 fw_build;
-	__le16 fw_major;
-	__le16 fw_minor;
-	__le16 api_major;
-	__le16 api_minor;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-
-/* Send driver version (indirect 0x0002) */
-struct i40e_aqc_driver_version {
-	u8	driver_major_ver;
-	u8	driver_minor_ver;
-	u8	driver_build_ver;
-	u8	driver_subbuild_ver;
-	u8	reserved[4];
-	__le32	address_high;
-	__le32	address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
-
-/* Queue Shutdown (direct 0x0003) */
-struct i40e_aqc_queue_shutdown {
-	__le32	driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING	0x1
-	u8	reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
-
-/* Set PF context (0x0004, direct) */
-struct i40e_aqc_set_pf_context {
-	u8	pf_id;
-	u8	reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
-
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-#define I40E_AQ_RESOURCE_NVM			1
-#define I40E_AQ_RESOURCE_SDP			2
-#define I40E_AQ_RESOURCE_ACCESS_READ		1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE		2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT	3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT	180000
-
-struct i40e_aqc_request_resource {
-	__le16	resource_id;
-	__le16	access_type;
-	__le32	timeout;
-	__le32	resource_number;
-	u8	reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct i40e_aqc_list_capabilites {
-	u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN	1
-	u8 pf_index;
-	u8 reserved[2];
-	__le32 count;
-	__le32 addr_high;
-	__le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
-
-struct i40e_aqc_list_capabilities_element_resp {
-	__le16	id;
-	u8	major_rev;
-	u8	minor_rev;
-	__le32	number;
-	__le32	logical_id;
-	__le32	phys_id;
-	u8	reserved[16];
-};
-
-/* list of caps */
-
-#define I40E_AQ_CAP_ID_SWITCH_MODE	0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE		0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE	0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP	0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID	0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM	0x0006
-#define I40E_AQ_CAP_ID_WOL_AND_PROXY	0x0008
-#define I40E_AQ_CAP_ID_SRIOV		0x0012
-#define I40E_AQ_CAP_ID_VF		0x0013
-#define I40E_AQ_CAP_ID_VMDQ		0x0014
-#define I40E_AQ_CAP_ID_8021QBG		0x0015
-#define I40E_AQ_CAP_ID_8021QBR		0x0016
-#define I40E_AQ_CAP_ID_VSI		0x0017
-#define I40E_AQ_CAP_ID_DCB		0x0018
-#define I40E_AQ_CAP_ID_FCOE		0x0021
-#define I40E_AQ_CAP_ID_ISCSI		0x0022
-#define I40E_AQ_CAP_ID_RSS		0x0040
-#define I40E_AQ_CAP_ID_RXQ		0x0041
-#define I40E_AQ_CAP_ID_TXQ		0x0042
-#define I40E_AQ_CAP_ID_MSIX		0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX		0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR	0x0045
-#define I40E_AQ_CAP_ID_1588		0x0046
-#define I40E_AQ_CAP_ID_IWARP		0x0051
-#define I40E_AQ_CAP_ID_LED		0x0061
-#define I40E_AQ_CAP_ID_SDP		0x0062
-#define I40E_AQ_CAP_ID_MDIO		0x0063
-#define I40E_AQ_CAP_ID_WSR_PROT		0x0064
-#define I40E_AQ_CAP_ID_NVM_MGMT		0x0080
-#define I40E_AQ_CAP_ID_FLEX10		0x00F1
-#define I40E_AQ_CAP_ID_CEM		0x00F2
-
-/* Set CPPM Configuration (direct 0x0103) */
-struct i40e_aqc_cppm_configuration {
-	__le16	command_flags;
-#define I40E_AQ_CPPM_EN_LTRC	0x0800
-#define I40E_AQ_CPPM_EN_DMCTH	0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX	0x2000
-#define I40E_AQ_CPPM_EN_HPTC	0x4000
-#define I40E_AQ_CPPM_EN_DMARC	0x8000
-	__le16	ttlx;
-	__le32	dmacr;
-	__le16	dmcth;
-	u8	hptc;
-	u8	reserved;
-	__le32	pfltrc;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
-
-/* Set ARP Proxy command / response (indirect 0x0104) */
-struct i40e_aqc_arp_proxy_data {
-	__le16	command_flags;
-#define I40E_AQ_ARP_INIT_IPV4	0x0800
-#define I40E_AQ_ARP_UNSUP_CTL	0x1000
-#define I40E_AQ_ARP_ENA		0x2000
-#define I40E_AQ_ARP_ADD_IPV4	0x4000
-#define I40E_AQ_ARP_DEL_IPV4	0x8000
-	__le16	table_id;
-	__le32	enabled_offloads;
-#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE	0x00000020
-#define I40E_AQ_ARP_OFFLOAD_ENABLE		0x00000800
-	__le32	ip_addr;
-	u8	mac_addr[6];
-	u8	reserved[2];
-};
-
-I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
-
-/* Set NS Proxy Table Entry Command (indirect 0x0105) */
-struct i40e_aqc_ns_proxy_data {
-	__le16	table_idx_mac_addr_0;
-	__le16	table_idx_mac_addr_1;
-	__le16	table_idx_ipv6_0;
-	__le16	table_idx_ipv6_1;
-	__le16	control;
-#define I40E_AQ_NS_PROXY_ADD_0		0x0001
-#define I40E_AQ_NS_PROXY_DEL_0		0x0002
-#define I40E_AQ_NS_PROXY_ADD_1		0x0004
-#define I40E_AQ_NS_PROXY_DEL_1		0x0008
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0	0x0010
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0	0x0020
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1	0x0040
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1	0x0080
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ	0x0100
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL	0x0200
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL	0x0400
-#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE	0x0800
-#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE	0x1000
-	u8	mac_addr_0[6];
-	u8	mac_addr_1[6];
-	u8	local_mac_addr[6];
-	u8	ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
-	u8	ipv6_addr_1[16];
-};
-
-I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
-
-/* Manage LAA Command (0x0106) - obsolete */
-struct i40e_aqc_mng_laa {
-	__le16	command_flags;
-#define I40E_AQ_LAA_FLAG_WR	0x8000
-	u8	reserved[2];
-	__le32	sal;
-	__le16	sah;
-	u8	reserved2[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
-
-/* Manage MAC Address Read Command (indirect 0x0107) */
-struct i40e_aqc_mac_address_read {
-	__le16	command_flags;
-#define I40E_AQC_LAN_ADDR_VALID		0x10
-#define I40E_AQC_SAN_ADDR_VALID		0x20
-#define I40E_AQC_PORT_ADDR_VALID	0x40
-#define I40E_AQC_WOL_ADDR_VALID		0x80
-#define I40E_AQC_MC_MAG_EN_VALID	0x100
-#define I40E_AQC_ADDR_VALID_MASK	0x3F0
-	u8	reserved[6];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
-
-struct i40e_aqc_mac_address_read_data {
-	u8 pf_lan_mac[6];
-	u8 pf_san_mac[6];
-	u8 port_mac[6];
-	u8 pf_wol_mac[6];
-};
-
-I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
-
-/* Manage MAC Address Write Command (0x0108) */
-struct i40e_aqc_mac_address_write {
-	__le16	command_flags;
-#define I40E_AQC_WRITE_TYPE_LAA_ONLY	0x0000
-#define I40E_AQC_WRITE_TYPE_LAA_WOL	0x4000
-#define I40E_AQC_WRITE_TYPE_PORT	0x8000
-#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG	0xC000
-#define I40E_AQC_WRITE_TYPE_MASK	0xC000
-
-	__le16	mac_sah;
-	__le32	mac_sal;
-	u8	reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
-
-/* PXE commands (0x011x) */
-
-/* Clear PXE Command and response  (direct 0x0110) */
-struct i40e_aqc_clear_pxe {
-	u8	rx_cnt;
-	u8	reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
-
-/* Set WoL Filter (0x0120) */
-
-struct i40e_aqc_set_wol_filter {
-	__le16 filter_index;
-#define I40E_AQC_MAX_NUM_WOL_FILTERS	8
-#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT	15
-#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK	(0x1 << \
-		I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
-
-#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT		0
-#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK	(0x7 << \
-		I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
-	__le16 cmd_flags;
-#define I40E_AQC_SET_WOL_FILTER				0x8000
-#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL		0x4000
-#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR	0x2000
-#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR		0
-#define I40E_AQC_SET_WOL_FILTER_ACTION_SET		1
-	__le16 valid_flags;
-#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID		0x8000
-#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID	0x4000
-	u8 reserved[2];
-	__le32	address_high;
-	__le32	address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
-
-struct i40e_aqc_set_wol_filter_data {
-	u8 filter[128];
-	u8 mask[16];
-};
-
-I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
-
-/* Get Wake Reason (0x0121) */
-
-struct i40e_aqc_get_wake_reason_completion {
-	u8 reserved_1[2];
-	__le16 wake_reason;
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT	0
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
-		I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT	8
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK	(0xFF << \
-		I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
-	u8 reserved_2[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
-
-/* Switch configuration commands (0x02xx) */
-
-/* Used by many indirect commands that only pass an seid and a buffer in the
- * command
- */
-struct i40e_aqc_switch_seid {
-	__le16	seid;
-	u8	reserved[6];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
-
-/* Get Switch Configuration command (indirect 0x0200)
- * uses i40e_aqc_switch_seid for the descriptor
- */
-struct i40e_aqc_get_switch_config_header_resp {
-	__le16	num_reported;
-	__le16	num_total;
-	u8	reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
-
-struct i40e_aqc_switch_config_element_resp {
-	u8	element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC	1
-#define I40E_AQ_SW_ELEM_TYPE_PF		2
-#define I40E_AQ_SW_ELEM_TYPE_VF		3
-#define I40E_AQ_SW_ELEM_TYPE_EMP	4
-#define I40E_AQ_SW_ELEM_TYPE_BMC	5
-#define I40E_AQ_SW_ELEM_TYPE_PV		16
-#define I40E_AQ_SW_ELEM_TYPE_VEB	17
-#define I40E_AQ_SW_ELEM_TYPE_PA		18
-#define I40E_AQ_SW_ELEM_TYPE_VSI	19
-	u8	revision;
-#define I40E_AQ_SW_ELEM_REV_1		1
-	__le16	seid;
-	__le16	uplink_seid;
-	__le16	downlink_seid;
-	u8	reserved[3];
-	u8	connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR	0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT	0x2
-#define I40E_AQ_CONN_TYPE_CASCADED	0x3
-	__le16	scheduler_id;
-	__le16	element_info;
-};
-
-I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
-
-/* Get Switch Configuration (indirect 0x0200)
- *    an array of elements are returned in the response buffer
- *    the first in the array is the header, remainder are elements
- */
-struct i40e_aqc_get_switch_config_resp {
-	struct i40e_aqc_get_switch_config_header_resp	header;
-	struct i40e_aqc_switch_config_element_resp	element[1];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
-
-/* Add Statistics (direct 0x0201)
- * Remove Statistics (direct 0x0202)
- */
-struct i40e_aqc_add_remove_statistics {
-	__le16	seid;
-	__le16	vlan;
-	__le16	stat_index;
-	u8	reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
-
-/* Set Port Parameters command (direct 0x0203) */
-struct i40e_aqc_set_port_parameters {
-	__le16	command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS	1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS	2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA	4
-	__le16	bad_frame_vsi;
-#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT	0x0
-#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK	0x3FF
-	__le16	default_seid;        /* reserved for command */
-	u8	reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
-
-/* Get Switch Resource Allocation (indirect 0x0204) */
-struct i40e_aqc_get_switch_resource_alloc {
-	u8	num_entries;         /* reserved for command */
-	u8	reserved[7];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
-
-/* expect an array of these structs in the response buffer */
-struct i40e_aqc_switch_resource_alloc_element_resp {
-	u8	resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB		0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI		0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR		0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG		0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG		0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH	0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH	0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN		0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY	0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY	0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL	0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE	0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS	0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS	0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS	0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS	0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS	0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS		0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS	0x13
-	u8	reserved1;
-	__le16	guaranteed;
-	__le16	total;
-	__le16	used;
-	__le16	total_unalloced;
-	u8	reserved2[6];
-};
-
-I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
-
-/* Set Switch Configuration (direct 0x0205) */
-struct i40e_aqc_set_switch_config {
-	__le16	flags;
-/* flags used for both fields below */
-#define I40E_AQ_SET_SWITCH_CFG_PROMISC		0x0001
-#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER	0x0002
-	__le16	valid_flags;
-	/* The ethertype in switch_tag is dropped on ingress and used
-	 * internally by the switch. Set this to zero for the default
-	 * of 0x88a8 (802.1ad). Should be zero for firmware API
-	 * versions lower than 1.7.
-	 */
-	__le16	switch_tag;
-	/* The ethertypes in first_tag and second_tag are used to
-	 * match the outer and inner VLAN tags (respectively) when HW
-	 * double VLAN tagging is enabled via the set port parameters
-	 * AQ command. Otherwise these are both ignored. Set them to
-	 * zero for their defaults of 0x8100 (802.1Q). Should be zero
-	 * for firmware API versions lower than 1.7.
-	 */
-	__le16	first_tag;
-	__le16	second_tag;
-	u8	reserved[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
-
-/* Read Receive control registers  (direct 0x0206)
- * Write Receive control registers (direct 0x0207)
- *     used for accessing Rx control registers that can be
- *     slow and need special handling when under high Rx load
- */
-struct i40e_aqc_rx_ctl_reg_read_write {
-	__le32 reserved1;
-	__le32 address;
-	__le32 reserved2;
-	__le32 value;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
-
-/* Add VSI (indirect 0x0210)
- *    this indirect command uses struct i40e_aqc_vsi_properties_data
- *    as the indirect buffer (128 bytes)
- *
- * Update VSI (indirect 0x211)
- *     uses the same data structure as Add VSI
- *
- * Get VSI (indirect 0x0212)
- *     uses the same completion and data structure as Add VSI
- */
-struct i40e_aqc_add_get_update_vsi {
-	__le16	uplink_seid;
-	u8	connection_type;
-#define I40E_AQ_VSI_CONN_TYPE_NORMAL	0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT	0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED	0x3
-	u8	reserved1;
-	u8	vf_id;
-	u8	reserved2;
-	__le16	vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT		0x0
-#define I40E_AQ_VSI_TYPE_MASK		(0x3 << I40E_AQ_VSI_TYPE_SHIFT)
-#define I40E_AQ_VSI_TYPE_VF		0x0
-#define I40E_AQ_VSI_TYPE_VMDQ2		0x1
-#define I40E_AQ_VSI_TYPE_PF		0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG	0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV	0x4
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
-
-struct i40e_aqc_add_get_update_vsi_completion {
-	__le16 seid;
-	__le16 vsi_number;
-	__le16 vsi_used;
-	__le16 vsi_free;
-	__le32 addr_high;
-	__le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
-
-struct i40e_aqc_vsi_properties_data {
-	/* first 96 byte are written by SW */
-	__le16	valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID		0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID		0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID		0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID		0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID	0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID	0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID	0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID	0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID		0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID		0x0200
-	/* switch section */
-	__le16	switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT		0x0000
-#define I40E_AQ_VSI_SW_ID_MASK		(0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG	0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB	0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB	0x4000
-	u8	sw_reserved[2];
-	/* security section */
-	u8	sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD	0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK	0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK	0x04
-	u8	sec_reserved;
-	/* VLAN section */
-	__le16	pvid; /* VLANS include priority bits */
-	__le16	fcoe_pvid;
-	u8	port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT	0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK	(0x03 << \
-					 I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED	0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED	0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL	0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID	0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT	0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << \
-					 I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH	0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP	0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR	0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING	0x18
-	u8	pvlan_reserved[3];
-	/* ingress egress up sections */
-	__le32	ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT	0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK	(0x7 << \
-					 I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT	3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK	(0x7 << \
-					 I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT	6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK	(0x7 << \
-					 I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT	9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK	(0x7 << \
-					 I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT	12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK	(0x7 << \
-					 I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT	15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK	(0x7 << \
-					 I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT	18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK	(0x7 << \
-					 I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT	21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK	(0x7 << \
-					 I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
-	__le32	egress_table;   /* same defines as for ingress table */
-	/* cascaded PV section */
-	__le16	cas_pv_tag;
-	u8	cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT		0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK		(0x03 << \
-						 I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE		0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE		0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY		0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG		0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE		0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG	0x40
-	u8	cas_pv_reserved;
-	/* queue mapping section */
-	__le16	mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG	0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG	0x1
-	__le16	queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT		0x0
-#define I40E_AQ_VSI_QUEUE_MASK		(0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
-	__le16	tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT	0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK	(0x1FF << \
-					 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT	9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK	(0x7 << \
-					 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
-	/* queueing option section */
-	u8	queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA	0x04
-#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA	0x08
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA	0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA	0x20
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF	0x00
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI	0x40
-	u8	queueing_opt_reserved[3];
-	/* scheduler section */
-	u8	up_enable_bits;
-	u8	sched_reserved;
-	/* outer up section */
-	__le32	outer_up_table; /* same structure and defines as ingress tbl */
-	u8	cmd_reserved[8];
-	/* last 32 bytes are written by FW */
-	__le16	qs_handle[8];
-#define I40E_AQ_VSI_QS_HANDLE_INVALID	0xFFFF
-	__le16	stat_counter_idx;
-	__le16	sched_id;
-	u8	resp_reserved[12];
-};
-
-I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
-
-/* Add Port Virtualizer (direct 0x0220)
- * also used for update PV (direct 0x0221) but only flags are used
- * (IS_CTRL_PORT only works on add PV)
- */
-struct i40e_aqc_add_update_pv {
-	__le16	command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE		0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN	0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN	0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT		0x8
-	__le16	uplink_seid;
-	__le16	connected_seid;
-	u8	reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
-
-struct i40e_aqc_add_update_pv_completion {
-	/* reserved for update; for add also encodes error if rc == ENOSPC */
-	__le16	pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV	0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED	0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER	0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY	0x8
-	u8	reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
-
-/* Get PV Params (direct 0x0222)
- * uses i40e_aqc_switch_seid for the descriptor
- */
-
-struct i40e_aqc_get_pv_params_completion {
-	__le16	seid;
-	__le16	default_stag;
-	__le16	pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE			0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG	0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG	0x4
-	u8	reserved[8];
-	__le16	default_port_seid;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
-
-/* Add VEB (direct 0x0230) */
-struct i40e_aqc_add_veb {
-	__le16	uplink_seid;
-	__le16	downlink_seid;
-	__le16	veb_flags;
-#define I40E_AQC_ADD_VEB_FLOATING		0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT	1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK		(0x3 << \
-					I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT	0x2
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA		0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER	0x8     /* deprecated */
-#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS	0x10
-	u8	enable_tcs;
-	u8	reserved[9];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
-
-struct i40e_aqc_add_veb_completion {
-	u8	reserved[6];
-	__le16	switch_seid;
-	/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
-	__le16	veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB		0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED		0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER	0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY		0x8
-	__le16	statistic_index;
-	__le16	vebs_used;
-	__le16	vebs_free;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
-
-/* Get VEB Parameters (direct 0x0232)
- * uses i40e_aqc_switch_seid for the descriptor
- */
-struct i40e_aqc_get_veb_parameters_completion {
-	__le16	seid;
-	__le16	switch_id;
-	__le16	veb_flags; /* only the first/last flags from 0x0230 is valid */
-	__le16	statistic_index;
-	__le16	vebs_used;
-	__le16	vebs_free;
-	u8	reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
-
-/* Delete Element (direct 0x0243)
- * uses the generic i40e_aqc_switch_seid
- */
-
-/* Add MAC-VLAN (indirect 0x0250) */
-
-/* used for the command for most vlan commands */
-struct i40e_aqc_macvlan {
-	__le16	num_addresses;
-	__le16	seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT	0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK	(0x3FF << \
-					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
-#define I40E_AQC_MACVLAN_CMD_SEID_VALID		0x8000
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
-
-/* indirect data for command and response */
-struct i40e_aqc_add_macvlan_element_data {
-	u8	mac_addr[6];
-	__le16	vlan_tag;
-	__le16	flags;
-#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH	0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH		0x0002
-#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN	0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE		0x0008
-#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC	0x0010
-	__le16	queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT	0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK		(0x7FF << \
-					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
-	/* response section */
-	u8	match_method;
-#define I40E_AQC_MM_PERFECT_MATCH	0x01
-#define I40E_AQC_MM_HASH_MATCH		0x02
-#define I40E_AQC_MM_ERR_NO_RES		0xFF
-	u8	reserved1[3];
-};
-
-struct i40e_aqc_add_remove_macvlan_completion {
-	__le16 perfect_mac_used;
-	__le16 perfect_mac_free;
-	__le16 unicast_hash_free;
-	__le16 multicast_hash_free;
-	__le32 addr_high;
-	__le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
-
-/* Remove MAC-VLAN (indirect 0x0251)
- * uses i40e_aqc_macvlan for the descriptor
- * data points to an array of num_addresses of elements
- */
-
-struct i40e_aqc_remove_macvlan_element_data {
-	u8	mac_addr[6];
-	__le16	vlan_tag;
-	u8	flags;
-#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH	0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH		0x02
-#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN	0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS		0x10
-	u8	reserved[3];
-	/* reply section */
-	u8	error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS		0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL		0xFF
-	u8	reply_reserved[3];
-};
-
-/* Add VLAN (indirect 0x0252)
- * Remove VLAN (indirect 0x0253)
- * use the generic i40e_aqc_macvlan for the command
- */
-struct i40e_aqc_add_remove_vlan_element_data {
-	__le16	vlan_tag;
-	u8	vlan_flags;
-/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL			0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT		1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK	(0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR		0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY		0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY	0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT		3
-#define I40E_AQC_VLAN_PTYPE_MASK	(0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI		0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI		0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI	0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI	0x18
-/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL	0x1
-	u8	reserved;
-	u8	result;
-/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS	0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST	0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE	0xFF
-/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS	0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL	0xFF
-	u8	reserved1[3];
-};
-
-struct i40e_aqc_add_remove_vlan_completion {
-	u8	reserved[4];
-	__le16	vlans_used;
-	__le16	vlans_free;
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-/* Set VSI Promiscuous Modes (direct 0x0254) */
-struct i40e_aqc_set_vsi_promiscuous_modes {
-	__le16	promiscuous_flags;
-	__le16	valid_flags;
-/* flags used for both fields above */
-#define I40E_AQC_SET_VSI_PROMISC_UNICAST	0x01
-#define I40E_AQC_SET_VSI_PROMISC_MULTICAST	0x02
-#define I40E_AQC_SET_VSI_PROMISC_BROADCAST	0x04
-#define I40E_AQC_SET_VSI_DEFAULT		0x08
-#define I40E_AQC_SET_VSI_PROMISC_VLAN		0x10
-#define I40E_AQC_SET_VSI_PROMISC_TX		0x8000
-	__le16	seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK		0x3FF
-	__le16	vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_MASK		0x0FFF
-#define I40E_AQC_SET_VSI_VLAN_VALID		0x8000
-	u8	reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
-
-/* Add S/E-tag command (direct 0x0255)
- * Uses generic i40e_aqc_add_remove_tag_completion for completion
- */
-struct i40e_aqc_add_tag {
-	__le16	flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE		0x0001
-	__le16	seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT	0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
-					I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
-	__le16	tag;
-	__le16	queue_number;
-	u8	reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
-
-struct i40e_aqc_add_remove_tag_completion {
-	u8	reserved[12];
-	__le16	tags_used;
-	__le16	tags_free;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
-
-/* Remove S/E-tag command (direct 0x0256)
- * Uses generic i40e_aqc_add_remove_tag_completion for completion
- */
-struct i40e_aqc_remove_tag {
-	__le16	seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT	0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
-					I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
-	__le16	tag;
-	u8	reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
-
-/* Add multicast E-Tag (direct 0x0257)
- * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
- * and no external data
- */
-struct i40e_aqc_add_remove_mcast_etag {
-	__le16	pv_seid;
-	__le16	etag;
-	u8	num_unicast_etags;
-	u8	reserved[3];
-	__le32	addr_high;          /* address of array of 2-byte s-tags */
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
-
-struct i40e_aqc_add_remove_mcast_etag_completion {
-	u8	reserved[4];
-	__le16	mcast_etags_used;
-	__le16	mcast_etags_free;
-	__le32	addr_high;
-	__le32	addr_low;
-
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
-
-/* Update S/E-Tag (direct 0x0259) */
-struct i40e_aqc_update_tag {
-	__le16	seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT	0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
-					I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
-	__le16	old_tag;
-	__le16	new_tag;
-	u8	reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
-
-struct i40e_aqc_update_tag_completion {
-	u8	reserved[12];
-	__le16	tags_used;
-	__le16	tags_free;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
-
-/* Add Control Packet filter (direct 0x025A)
- * Remove Control Packet filter (direct 0x025B)
- * uses the i40e_aqc_add_oveb_cloud,
- * and the generic direct completion structure
- */
-struct i40e_aqc_add_remove_control_packet_filter {
-	u8	mac[6];
-	__le16	etype;
-	__le16	flags;
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC	0x0001
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP		0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE	0x0004
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX		0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX		0x0000
-	__le16	seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT	0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK	(0x3FF << \
-				I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
-	__le16	queue;
-	u8	reserved[2];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
-
-struct i40e_aqc_add_remove_control_packet_filter_completion {
-	__le16	mac_etype_used;
-	__le16	etype_used;
-	__le16	mac_etype_free;
-	__le16	etype_free;
-	u8	reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
-
-/* Add Cloud filters (indirect 0x025C)
- * Remove Cloud filters (indirect 0x025D)
- * uses the i40e_aqc_add_remove_cloud_filters,
- * and the generic indirect completion structure
- */
-struct i40e_aqc_add_remove_cloud_filters {
-	u8	num_filters;
-	u8	reserved;
-	__le16	seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT	0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK	(0x3FF << \
-					I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
-	u8	big_buffer_flag;
-#define I40E_AQC_ADD_CLOUD_CMD_BB	1
-	u8	reserved2[3];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
-
-struct i40e_aqc_cloud_filters_element_data {
-	u8	outer_mac[6];
-	u8	inner_mac[6];
-	__le16	inner_vlan;
-	union {
-		struct {
-			u8 reserved[12];
-			u8 data[4];
-		} v4;
-		struct {
-			u8 data[16];
-		} v6;
-		struct {
-			__le16 data[8];
-		} raw_v6;
-	} ipaddr;
-	__le16	flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT			0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK	(0x3F << \
-					I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
-/* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP			0x0001
-/* 0x0002 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN		0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID	0x0004
-/* 0x0005 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID		0x0006
-/* 0x0007 reserved */
-/* 0x0008 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC			0x0009
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC			0x000A
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC	0x000B
-#define I40E_AQC_ADD_CLOUD_FILTER_IIP			0x000C
-/* 0x0010 to 0x0017 is for custom filters */
-#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT		0x0010 /* Dest IP + L4 Port */
-#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT		0x0011 /* Dest MAC + L4 Port */
-#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT		0x0012 /* Dest MAC + VLAN + L4 Port */
-
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE		0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT			6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK			0x00C0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4			0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6			0x0100
-
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT		9
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK		0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN		0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC		1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE		2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP			3
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED		4
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE		5
-
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC	0x2000
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC	0x4000
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP	0x8000
-
-	__le32	tenant_id;
-	u8	reserved[4];
-	__le16	queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT		0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK		(0x7FF << \
-						 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
-	u8	reserved2[14];
-	/* response section */
-	u8	allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS	0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL		0xFF
-	u8	response_reserved[7];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
-
-/* i40e_aqc_cloud_filters_element_bb is used when
- * I40E_AQC_ADD_CLOUD_CMD_BB flag is set.
- */
-struct i40e_aqc_cloud_filters_element_bb {
-	struct i40e_aqc_cloud_filters_element_data element;
-	u16     general_fields[32];
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0	0
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1	1
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2	2
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0	3
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1	4
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2	5
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0	6
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1	7
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2	8
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0	9
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1	10
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2	11
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0	12
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1	13
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2	14
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0	15
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1	16
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2	17
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3	18
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4	19
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5	20
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6	21
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7	22
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0	23
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1	24
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2	25
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3	26
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4	27
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5	28
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6	29
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7	30
-};
-
-I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
-
-struct i40e_aqc_remove_cloud_filters_completion {
-	__le16 perfect_ovlan_used;
-	__le16 perfect_ovlan_free;
-	__le16 vlan_used;
-	__le16 vlan_free;
-	__le32 addr_high;
-	__le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
-
-/* Replace filter Command 0x025F
- * uses the i40e_aqc_replace_cloud_filters,
- * and the generic indirect completion structure
- */
-struct i40e_filter_data {
-	u8 filter_type;
-	u8 input[3];
-};
-
-I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
-
-struct i40e_aqc_replace_cloud_filters_cmd {
-	u8      valid_flags;
-#define I40E_AQC_REPLACE_L1_FILTER		0x0
-#define I40E_AQC_REPLACE_CLOUD_FILTER		0x1
-#define I40E_AQC_GET_CLOUD_FILTERS		0x2
-#define I40E_AQC_MIRROR_CLOUD_FILTER		0x4
-#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER	0x8
-	u8      old_filter_type;
-	u8      new_filter_type;
-	u8      tr_bit;
-	u8      reserved[4];
-	__le32 addr_high;
-	__le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
-
-struct i40e_aqc_replace_cloud_filters_cmd_buf {
-	u8      data[32];
-/* Filter type INPUT codes*/
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX	3
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED	BIT(7)
-
-/* Field Vector offsets */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA	0
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH	6
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG	7
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN	8
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN	9
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN	10
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY	11
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC	12
-/* big FLU */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA	14
-/* big FLU */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA	15
-
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN	37
-	struct i40e_filter_data filters[8];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
-
-/* Add Mirror Rule (indirect or direct 0x0260)
- * Delete Mirror Rule (indirect or direct 0x0261)
- * note: some rule types (4,5) do not use an external buffer.
- *       take care to set the flags correctly.
- */
-struct i40e_aqc_add_delete_mirror_rule {
-	__le16 seid;
-	__le16 rule_type;
-#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT		0
-#define I40E_AQC_MIRROR_RULE_TYPE_MASK		(0x7 << \
-						I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS	1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS	2
-#define I40E_AQC_MIRROR_RULE_TYPE_VLAN		3
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS	4
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS	5
-	__le16 num_entries;
-	__le16 destination;  /* VSI for add, rule id for delete */
-	__le32 addr_high;    /* address of array of 2-byte VSI or VLAN ids */
-	__le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
-
-struct i40e_aqc_add_delete_mirror_rule_completion {
-	u8	reserved[2];
-	__le16	rule_id;  /* only used on add */
-	__le16	mirror_rules_used;
-	__le16	mirror_rules_free;
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-
-/* Dynamic Device Personalization */
-struct i40e_aqc_write_personalization_profile {
-	u8      flags;
-	u8      reserved[3];
-	__le32  profile_track_id;
-	__le32  addr_high;
-	__le32  addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-
-struct i40e_aqc_write_ddp_resp {
-	__le32 error_offset;
-	__le32 error_info;
-	__le32 addr_high;
-	__le32 addr_low;
-};
-
-struct i40e_aqc_get_applied_profiles {
-	u8      flags;
-#define I40E_AQC_GET_DDP_GET_CONF	0x1
-#define I40E_AQC_GET_DDP_GET_RDPU_CONF	0x2
-	u8      rsv[3];
-	__le32  reserved;
-	__le32  addr_high;
-	__le32  addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
-
-/* DCB 0x03xx*/
-
-/* PFC Ignore (direct 0x0301)
- *    the command and response use the same descriptor structure
- */
-struct i40e_aqc_pfc_ignore {
-	u8	tc_bitmap;
-	u8	command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET		0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR	0x0
-	u8	reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
-
-/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
- * with no parameters
- */
-
-/* TX scheduler 0x04xx */
-
-/* Almost all the indirect commands use
- * this generic struct to pass the SEID in param0
- */
-struct i40e_aqc_tx_sched_ind {
-	__le16	vsi_seid;
-	u8	reserved[6];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
-
-/* Several commands respond with a set of queue set handles */
-struct i40e_aqc_qs_handles_resp {
-	__le16 qs_handles[8];
-};
-
-/* Configure VSI BW limits (direct 0x0400) */
-struct i40e_aqc_configure_vsi_bw_limit {
-	__le16	vsi_seid;
-	u8	reserved[2];
-	__le16	credit;
-	u8	reserved1[2];
-	u8	max_credit; /* 0-3, limit = 2^max */
-	u8	reserved2[7];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
-
-/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
- *    responds with i40e_aqc_qs_handles_resp
- */
-struct i40e_aqc_configure_vsi_ets_sla_bw_data {
-	u8	tc_valid_bits;
-	u8	reserved[15];
-	__le16	tc_bw_credits[8]; /* FW writesback QS handles here */
-
-	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16	tc_bw_max[2];
-	u8	reserved1[28];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
-
-/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
- *    responds with i40e_aqc_qs_handles_resp
- */
-struct i40e_aqc_configure_vsi_tc_bw_data {
-	u8	tc_valid_bits;
-	u8	reserved[3];
-	u8	tc_bw_credits[8];
-	u8	reserved1[4];
-	__le16	qs_handles[8];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
-
-/* Query vsi bw configuration (indirect 0x0408) */
-struct i40e_aqc_query_vsi_bw_config_resp {
-	u8	tc_valid_bits;
-	u8	tc_suspended_bits;
-	u8	reserved[14];
-	__le16	qs_handles[8];
-	u8	reserved1[4];
-	__le16	port_bw_limit;
-	u8	reserved2[2];
-	u8	max_bw; /* 0-3, limit = 2^max */
-	u8	reserved3[23];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
-
-/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
-struct i40e_aqc_query_vsi_ets_sla_config_resp {
-	u8	tc_valid_bits;
-	u8	reserved[3];
-	u8	share_credits[8];
-	__le16	credits[8];
-
-	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16	tc_bw_max[2];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
-
-/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
-struct i40e_aqc_configure_switching_comp_bw_limit {
-	__le16	seid;
-	u8	reserved[2];
-	__le16	credit;
-	u8	reserved1[2];
-	u8	max_bw; /* 0-3, limit = 2^max */
-	u8	reserved2[7];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
-
-/* Enable  Physical Port ETS (indirect 0x0413)
- * Modify  Physical Port ETS (indirect 0x0414)
- * Disable Physical Port ETS (indirect 0x0415)
- */
-struct i40e_aqc_configure_switching_comp_ets_data {
-	u8	reserved[4];
-	u8	tc_valid_bits;
-	u8	seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK	0x1
-	u8	tc_strict_priority_flags;
-	u8	reserved1[17];
-	u8	tc_bw_share_credits[8];
-	u8	reserved2[96];
-};
-
-I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
-
-/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
-struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
-	u8	tc_valid_bits;
-	u8	reserved[15];
-	__le16	tc_bw_credit[8];
-
-	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16	tc_bw_max[2];
-	u8	reserved1[28];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40,
-		      i40e_aqc_configure_switching_comp_ets_bw_limit_data);
-
-/* Configure Switching Component Bandwidth Allocation per Tc
- * (indirect 0x0417)
- */
-struct i40e_aqc_configure_switching_comp_bw_config_data {
-	u8	tc_valid_bits;
-	u8	reserved[2];
-	u8	absolute_credits; /* bool */
-	u8	tc_bw_share_credits[8];
-	u8	reserved1[20];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
-
-/* Query Switching Component Configuration (indirect 0x0418) */
-struct i40e_aqc_query_switching_comp_ets_config_resp {
-	u8	tc_valid_bits;
-	u8	reserved[35];
-	__le16	port_bw_limit;
-	u8	reserved1[2];
-	u8	tc_bw_max; /* 0-3, limit = 2^max */
-	u8	reserved2[23];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
-
-/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
-struct i40e_aqc_query_port_ets_config_resp {
-	u8	reserved[4];
-	u8	tc_valid_bits;
-	u8	reserved1;
-	u8	tc_strict_priority_bits;
-	u8	reserved2;
-	u8	tc_bw_share_credits[8];
-	__le16	tc_bw_limits[8];
-
-	/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
-	__le16	tc_bw_max[2];
-	u8	reserved3[32];
-};
-
-I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
-
-/* Query Switching Component Bandwidth Allocation per Traffic Type
- * (indirect 0x041A)
- */
-struct i40e_aqc_query_switching_comp_bw_config_resp {
-	u8	tc_valid_bits;
-	u8	reserved[2];
-	u8	absolute_credits_enable; /* bool */
-	u8	tc_bw_share_credits[8];
-	__le16	tc_bw_limits[8];
-
-	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
-	__le16	tc_bw_max[2];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
-
-/* Suspend/resume port TX traffic
- * (direct 0x041B and 0x041C) uses the generic SEID struct
- */
-
-/* Configure partition BW
- * (indirect 0x041D)
- */
-struct i40e_aqc_configure_partition_bw_data {
-	__le16	pf_valid_bits;
-	u8	min_bw[16];      /* guaranteed bandwidth */
-	u8	max_bw[16];      /* bandwidth limit */
-};
-
-I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
-	u8	pm_profile;
-	u8	pe_vf_enabled;
-	u8	reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
-	/* I40E_HMC_PROFILE_NO_CHANGE	= 0, reserved */
-	I40E_HMC_PROFILE_DEFAULT	= 1,
-	I40E_HMC_PROFILE_FAVOR_VF	= 2,
-	I40E_HMC_PROFILE_EQUAL		= 3,
-};
-
-/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
-
-/* set in param0 for get phy abilities to report qualified modules */
-#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES	0x0001
-#define I40E_AQ_PHY_REPORT_INITIAL_VALUES	0x0002
-
-enum i40e_aq_phy_type {
-	I40E_PHY_TYPE_SGMII			= 0x0,
-	I40E_PHY_TYPE_1000BASE_KX		= 0x1,
-	I40E_PHY_TYPE_10GBASE_KX4		= 0x2,
-	I40E_PHY_TYPE_10GBASE_KR		= 0x3,
-	I40E_PHY_TYPE_40GBASE_KR4		= 0x4,
-	I40E_PHY_TYPE_XAUI			= 0x5,
-	I40E_PHY_TYPE_XFI			= 0x6,
-	I40E_PHY_TYPE_SFI			= 0x7,
-	I40E_PHY_TYPE_XLAUI			= 0x8,
-	I40E_PHY_TYPE_XLPPI			= 0x9,
-	I40E_PHY_TYPE_40GBASE_CR4_CU		= 0xA,
-	I40E_PHY_TYPE_10GBASE_CR1_CU		= 0xB,
-	I40E_PHY_TYPE_10GBASE_AOC		= 0xC,
-	I40E_PHY_TYPE_40GBASE_AOC		= 0xD,
-	I40E_PHY_TYPE_UNRECOGNIZED		= 0xE,
-	I40E_PHY_TYPE_UNSUPPORTED		= 0xF,
-	I40E_PHY_TYPE_100BASE_TX		= 0x11,
-	I40E_PHY_TYPE_1000BASE_T		= 0x12,
-	I40E_PHY_TYPE_10GBASE_T			= 0x13,
-	I40E_PHY_TYPE_10GBASE_SR		= 0x14,
-	I40E_PHY_TYPE_10GBASE_LR		= 0x15,
-	I40E_PHY_TYPE_10GBASE_SFPP_CU		= 0x16,
-	I40E_PHY_TYPE_10GBASE_CR1		= 0x17,
-	I40E_PHY_TYPE_40GBASE_CR4		= 0x18,
-	I40E_PHY_TYPE_40GBASE_SR4		= 0x19,
-	I40E_PHY_TYPE_40GBASE_LR4		= 0x1A,
-	I40E_PHY_TYPE_1000BASE_SX		= 0x1B,
-	I40E_PHY_TYPE_1000BASE_LX		= 0x1C,
-	I40E_PHY_TYPE_1000BASE_T_OPTICAL	= 0x1D,
-	I40E_PHY_TYPE_20GBASE_KR2		= 0x1E,
-	I40E_PHY_TYPE_25GBASE_KR		= 0x1F,
-	I40E_PHY_TYPE_25GBASE_CR		= 0x20,
-	I40E_PHY_TYPE_25GBASE_SR		= 0x21,
-	I40E_PHY_TYPE_25GBASE_LR		= 0x22,
-	I40E_PHY_TYPE_25GBASE_AOC		= 0x23,
-	I40E_PHY_TYPE_25GBASE_ACC		= 0x24,
-	I40E_PHY_TYPE_MAX,
-	I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP	= 0xFD,
-	I40E_PHY_TYPE_EMPTY			= 0xFE,
-	I40E_PHY_TYPE_DEFAULT			= 0xFF,
-};
-
-#define I40E_LINK_SPEED_100MB_SHIFT	0x1
-#define I40E_LINK_SPEED_1000MB_SHIFT	0x2
-#define I40E_LINK_SPEED_10GB_SHIFT	0x3
-#define I40E_LINK_SPEED_40GB_SHIFT	0x4
-#define I40E_LINK_SPEED_20GB_SHIFT	0x5
-#define I40E_LINK_SPEED_25GB_SHIFT	0x6
-
-enum i40e_aq_link_speed {
-	I40E_LINK_SPEED_UNKNOWN	= 0,
-	I40E_LINK_SPEED_100MB	= BIT(I40E_LINK_SPEED_100MB_SHIFT),
-	I40E_LINK_SPEED_1GB	= BIT(I40E_LINK_SPEED_1000MB_SHIFT),
-	I40E_LINK_SPEED_10GB	= BIT(I40E_LINK_SPEED_10GB_SHIFT),
-	I40E_LINK_SPEED_40GB	= BIT(I40E_LINK_SPEED_40GB_SHIFT),
-	I40E_LINK_SPEED_20GB	= BIT(I40E_LINK_SPEED_20GB_SHIFT),
-	I40E_LINK_SPEED_25GB	= BIT(I40E_LINK_SPEED_25GB_SHIFT),
-};
-
-struct i40e_aqc_module_desc {
-	u8 oui[3];
-	u8 reserved1;
-	u8 part_number[16];
-	u8 revision[4];
-	u8 reserved2[8];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
-
-struct i40e_aq_get_phy_abilities_resp {
-	__le32	phy_type;       /* bitmap using the above enum for offsets */
-	u8	link_speed;     /* bitmap using the above enum bit patterns */
-	u8	abilities;
-#define I40E_AQ_PHY_FLAG_PAUSE_TX	0x01
-#define I40E_AQ_PHY_FLAG_PAUSE_RX	0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER	0x04
-#define I40E_AQ_PHY_LINK_ENABLED	0x08
-#define I40E_AQ_PHY_AN_ENABLED		0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL	0x20
-#define I40E_AQ_PHY_FEC_ABILITY_KR	0x40
-#define I40E_AQ_PHY_FEC_ABILITY_RS	0x80
-	__le16	eee_capability;
-#define I40E_AQ_EEE_100BASE_TX		0x0002
-#define I40E_AQ_EEE_1000BASE_T		0x0004
-#define I40E_AQ_EEE_10GBASE_T		0x0008
-#define I40E_AQ_EEE_1000BASE_KX		0x0010
-#define I40E_AQ_EEE_10GBASE_KX4		0x0020
-#define I40E_AQ_EEE_10GBASE_KR		0x0040
-	__le32	eeer_val;
-	u8	d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA	0x01
-	u8	phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR	0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR	0X02
-#define I40E_AQ_PHY_TYPE_EXT_25G_SR	0x04
-#define I40E_AQ_PHY_TYPE_EXT_25G_LR	0x08
-#define I40E_AQ_PHY_TYPE_EXT_25G_AOC	0x10
-#define I40E_AQ_PHY_TYPE_EXT_25G_ACC	0x20
-	u8	fec_cfg_curr_mod_ext_info;
-#define I40E_AQ_ENABLE_FEC_KR		0x01
-#define I40E_AQ_ENABLE_FEC_RS		0x02
-#define I40E_AQ_REQUEST_FEC_KR		0x04
-#define I40E_AQ_REQUEST_FEC_RS		0x08
-#define I40E_AQ_ENABLE_FEC_AUTO		0x10
-#define I40E_AQ_FEC
-#define I40E_AQ_MODULE_TYPE_EXT_MASK	0xE0
-#define I40E_AQ_MODULE_TYPE_EXT_SHIFT	5
-
-	u8	ext_comp_code;
-	u8	phy_id[4];
-	u8	module_type[3];
-	u8	qualified_module_count;
-#define I40E_AQ_PHY_MAX_QMS		16
-	struct i40e_aqc_module_desc	qualified_module[I40E_AQ_PHY_MAX_QMS];
-};
-
-I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
-
-/* Set PHY Config (direct 0x0601) */
-struct i40e_aq_set_phy_config { /* same bits as above in all */
-	__le32	phy_type;
-	u8	link_speed;
-	u8	abilities;
-/* bits 0-2 use the values from get_phy_abilities_resp */
-#define I40E_AQ_PHY_ENABLE_LINK		0x08
-#define I40E_AQ_PHY_ENABLE_AN		0x10
-#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK	0x20
-	__le16	eee_capability;
-	__le32	eeer;
-	u8	low_power_ctrl;
-	u8	phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR	0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR	0X02
-#define I40E_AQ_PHY_TYPE_EXT_25G_SR	0x04
-#define I40E_AQ_PHY_TYPE_EXT_25G_LR	0x08
-	u8	fec_config;
-#define I40E_AQ_SET_FEC_ABILITY_KR	BIT(0)
-#define I40E_AQ_SET_FEC_ABILITY_RS	BIT(1)
-#define I40E_AQ_SET_FEC_REQUEST_KR	BIT(2)
-#define I40E_AQ_SET_FEC_REQUEST_RS	BIT(3)
-#define I40E_AQ_SET_FEC_AUTO		BIT(4)
-#define I40E_AQ_PHY_FEC_CONFIG_SHIFT	0x0
-#define I40E_AQ_PHY_FEC_CONFIG_MASK	(0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
-	u8	reserved;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
-
-/* Set MAC Config command data structure (direct 0x0603) */
-struct i40e_aq_set_mac_config {
-	__le16	max_frame_size;
-	u8	params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN		0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK	0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT	3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE	0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX	0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX	0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX	0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX	0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX	0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX	0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX	0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX	0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX	0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX	0x1
-	u8	tx_timer_priority; /* bitmap */
-	__le16	tx_timer_value;
-	__le16	fc_refresh_threshold;
-	u8	reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
-
-/* Restart Auto-Negotiation (direct 0x605) */
-struct i40e_aqc_set_link_restart_an {
-	u8	command;
-#define I40E_AQ_PHY_RESTART_AN	0x02
-#define I40E_AQ_PHY_LINK_ENABLE	0x04
-	u8	reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
-
-/* Get Link Status cmd & response data structure (direct 0x0607) */
-struct i40e_aqc_get_link_status {
-	__le16	command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK		0x3
-#define I40E_AQ_LSE_NOP			0x0
-#define I40E_AQ_LSE_DISABLE		0x2
-#define I40E_AQ_LSE_ENABLE		0x3
-/* only response uses this flag */
-#define I40E_AQ_LSE_IS_ENABLED		0x1
-	u8	phy_type;    /* i40e_aq_phy_type   */
-	u8	link_speed;  /* i40e_aq_link_speed */
-	u8	link_info;
-#define I40E_AQ_LINK_UP			0x01    /* obsolete */
-#define I40E_AQ_LINK_UP_FUNCTION	0x01
-#define I40E_AQ_LINK_FAULT		0x02
-#define I40E_AQ_LINK_FAULT_TX		0x04
-#define I40E_AQ_LINK_FAULT_RX		0x08
-#define I40E_AQ_LINK_FAULT_REMOTE	0x10
-#define I40E_AQ_LINK_UP_PORT		0x20
-#define I40E_AQ_MEDIA_AVAILABLE		0x40
-#define I40E_AQ_SIGNAL_DETECT		0x80
-	u8	an_info;
-#define I40E_AQ_AN_COMPLETED		0x01
-#define I40E_AQ_LP_AN_ABILITY		0x02
-#define I40E_AQ_PD_FAULT		0x04
-#define I40E_AQ_FEC_EN			0x08
-#define I40E_AQ_PHY_LOW_POWER		0x10
-#define I40E_AQ_LINK_PAUSE_TX		0x20
-#define I40E_AQ_LINK_PAUSE_RX		0x40
-#define I40E_AQ_QUALIFIED_MODULE	0x80
-	u8	ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM	0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS	0x02
-#define I40E_AQ_LINK_TX_SHIFT		0x02
-#define I40E_AQ_LINK_TX_MASK		(0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE		0x00
-#define I40E_AQ_LINK_TX_DRAINED		0x01
-#define I40E_AQ_LINK_TX_FLUSHED		0x03
-#define I40E_AQ_LINK_FORCED_40G		0x10
-/* 25G Error Codes */
-#define I40E_AQ_25G_NO_ERR		0X00
-#define I40E_AQ_25G_NOT_PRESENT		0X01
-#define I40E_AQ_25G_NVM_CRC_ERR		0X02
-#define I40E_AQ_25G_SBUS_UCODE_ERR	0X03
-#define I40E_AQ_25G_SERDES_UCODE_ERR	0X04
-#define I40E_AQ_25G_NIMB_UCODE_ERR	0X05
-	u8	loopback; /* use defines from i40e_aqc_set_lb_mode */
-/* Since firmware API 1.7 loopback field keeps power class info as well */
-#define I40E_AQ_LOOPBACK_MASK		0x07
-#define I40E_AQ_PWR_CLASS_SHIFT_LB	6
-#define I40E_AQ_PWR_CLASS_MASK_LB	(0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
-	__le16	max_frame_size;
-	u8	config;
-#define I40E_AQ_CONFIG_FEC_KR_ENA	0x01
-#define I40E_AQ_CONFIG_FEC_RS_ENA	0x02
-#define I40E_AQ_CONFIG_CRC_ENA		0x04
-#define I40E_AQ_CONFIG_PACING_MASK	0x78
-	union {
-		struct {
-			u8	power_desc;
-#define I40E_AQ_LINK_POWER_CLASS_1	0x00
-#define I40E_AQ_LINK_POWER_CLASS_2	0x01
-#define I40E_AQ_LINK_POWER_CLASS_3	0x02
-#define I40E_AQ_LINK_POWER_CLASS_4	0x03
-#define I40E_AQ_PWR_CLASS_MASK		0x03
-			u8	reserved[4];
-		};
-		struct {
-			u8	link_type[4];
-			u8	link_type_ext;
-		};
-	};
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
-
-/* Set event mask command (direct 0x613) */
-struct i40e_aqc_set_phy_int_mask {
-	u8	reserved[8];
-	__le16	event_mask;
-#define I40E_AQ_EVENT_LINK_UPDOWN	0x0002
-#define I40E_AQ_EVENT_MEDIA_NA		0x0004
-#define I40E_AQ_EVENT_LINK_FAULT	0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM	0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS	0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT	0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED	0x0080
-#define I40E_AQ_EVENT_MODULE_QUAL_FAIL	0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED	0x0200
-	u8	reserved1[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
-
-/* Get Local AN advt register (direct 0x0614)
- * Set Local AN advt register (direct 0x0615)
- * Get Link Partner AN advt register (direct 0x0616)
- */
-struct i40e_aqc_an_advt_reg {
-	__le32	local_an_reg0;
-	__le16	local_an_reg1;
-	u8	reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
-
-/* Set Loopback mode (0x0618) */
-struct i40e_aqc_set_lb_mode {
-	__le16	lb_mode;
-#define I40E_AQ_LB_PHY_LOCAL	0x01
-#define I40E_AQ_LB_PHY_REMOTE	0x02
-#define I40E_AQ_LB_MAC_LOCAL	0x04
-	u8	reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
-
-/* Set PHY Debug command (0x0622) */
-struct i40e_aqc_set_phy_debug {
-	u8	command_flags;
-#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL	0x02
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT	2
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK	(0x03 << \
-					I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE	0x00
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD	0x01
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT	0x02
-#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW	0x10
-	u8	reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
-
-enum i40e_aq_phy_reg_type {
-	I40E_AQC_PHY_REG_INTERNAL	= 0x1,
-	I40E_AQC_PHY_REG_EXERNAL_BASET	= 0x2,
-	I40E_AQC_PHY_REG_EXERNAL_MODULE	= 0x3
-};
-
-/* Run PHY Activity (0x0626) */
-struct i40e_aqc_run_phy_activity {
-	__le16  activity_id;
-	u8      flags;
-	u8      reserved1;
-	__le32  control;
-	__le32  data;
-	u8      reserved2[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
-
-/* Set PHY Register command (0x0628) */
-/* Get PHY Register command (0x0629) */
-struct i40e_aqc_phy_register_access {
-	u8	phy_interface;
-#define I40E_AQ_PHY_REG_ACCESS_INTERNAL	0
-#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL	1
-#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE	2
-	u8	dev_address;
-	u8	reserved1[2];
-	__le32	reg_address;
-	__le32	reg_value;
-	u8	reserved2[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
-
-/* NVM Read command (indirect 0x0701)
- * NVM Erase commands (direct 0x0702)
- * NVM Update commands (indirect 0x0703)
- */
-struct i40e_aqc_nvm_update {
-	u8	command_flags;
-#define I40E_AQ_NVM_LAST_CMD			0x01
-#define I40E_AQ_NVM_REARRANGE_TO_FLAT		0x20
-#define I40E_AQ_NVM_REARRANGE_TO_STRUCT		0x40
-#define I40E_AQ_NVM_FLASH_ONLY			0x80
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT	1
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK	0x03
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED	0x03
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL	0x01
-	u8	module_pointer;
-	__le16	length;
-	__le32	offset;
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
-
-/* NVM Config Read (indirect 0x0704) */
-struct i40e_aqc_nvm_config_read {
-	__le16	cmd_flags;
-#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK	1
-#define I40E_AQ_ANVM_READ_SINGLE_FEATURE		0
-#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES		1
-	__le16	element_count;
-	__le16	element_id;	/* Feature/field ID */
-	__le16	element_id_msw;	/* MSWord of field ID */
-	__le32	address_high;
-	__le32	address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
-
-/* NVM Config Write (indirect 0x0705) */
-struct i40e_aqc_nvm_config_write {
-	__le16	cmd_flags;
-	__le16	element_count;
-	u8	reserved[4];
-	__le32	address_high;
-	__le32	address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
-
-/* Used for 0x0704 as well as for 0x0705 commands */
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT		1
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
-				BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
-#define I40E_AQ_ANVM_FEATURE		0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD	BIT(FEATURE_OR_IMMEDIATE_SHIFT)
-struct i40e_aqc_nvm_config_data_feature {
-	__le16 feature_id;
-#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY		0x01
-#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP		0x08
-#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR		0x10
-	__le16 feature_options;
-	__le16 feature_selection;
-};
-
-I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
-
-struct i40e_aqc_nvm_config_data_immediate_field {
-	__le32 field_id;
-	__le32 field_value;
-	__le16 field_options;
-	__le16 reserved;
-};
-
-I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
-
-/* OEM Post Update (indirect 0x0720)
- * no command data struct used
- */
- struct i40e_aqc_nvm_oem_post_update {
-#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA	0x01
-	u8 sel_data;
-	u8 reserved[7];
-};
-
-I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
-
-struct i40e_aqc_nvm_oem_post_update_buffer {
-	u8 str_len;
-	u8 dev_addr;
-	__le16 eeprom_addr;
-	u8 data[36];
-};
-
-I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
-
-/* Thermal Sensor (indirect 0x0721)
- *     read or set thermal sensor configs and values
- *     takes a sensor and command specific data buffer, not detailed here
- */
-struct i40e_aqc_thermal_sensor {
-	u8 sensor_action;
-#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG	0
-#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG	1
-#define I40E_AQ_THERMAL_SENSOR_READ_TEMP	2
-	u8 reserved[7];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor);
-
-/* Send to PF command (indirect 0x0801) id is only used by PF
- * Send to VF command (indirect 0x0802) id is only used by PF
- * Send to Peer PF command (indirect 0x0803)
- */
-struct i40e_aqc_pf_vf_message {
-	__le32	id;
-	u8	reserved[4];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
-
-/* Alternate structure */
-
-/* Direct write (direct 0x0900)
- * Direct read (direct 0x0902)
- */
-struct i40e_aqc_alternate_write {
-	__le32 address0;
-	__le32 data0;
-	__le32 address1;
-	__le32 data1;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
-
-/* Indirect write (indirect 0x0901)
- * Indirect read (indirect 0x0903)
- */
-
-struct i40e_aqc_alternate_ind_write {
-	__le32 address;
-	__le32 length;
-	__le32 addr_high;
-	__le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
-
-/* Done alternate write (direct 0x0904)
- * uses i40e_aq_desc
- */
-struct i40e_aqc_alternate_write_done {
-	__le16	cmd_flags;
-#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK	1
-#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY	0
-#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI	1
-#define I40E_AQ_ALTERNATE_RESET_NEEDED		2
-	u8	reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
-
-/* Set OEM mode (direct 0x0905) */
-struct i40e_aqc_alternate_set_mode {
-	__le32	mode;
-#define I40E_AQ_ALTERNATE_MODE_NONE	0
-#define I40E_AQ_ALTERNATE_MODE_OEM	1
-	u8	reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
-
-/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
-
-/* async events 0x10xx */
-
-/* Lan Queue Overflow Event (direct, 0x1001) */
-struct i40e_aqc_lan_overflow {
-	__le32	prtdcb_rupto;
-	__le32	otx_ctl;
-	u8	reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
-
-/* Get LLDP MIB (indirect 0x0A00) */
-struct i40e_aqc_lldp_get_mib {
-	u8	type;
-	u8	reserved1;
-#define I40E_AQ_LLDP_MIB_TYPE_MASK		0x3
-#define I40E_AQ_LLDP_MIB_LOCAL			0x0
-#define I40E_AQ_LLDP_MIB_REMOTE			0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE	0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK		0xC
-#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT		0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE	0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR	0x1
-#define I40E_AQ_LLDP_TX_SHIFT			0x4
-#define I40E_AQ_LLDP_TX_MASK			(0x03 << I40E_AQ_LLDP_TX_SHIFT)
-/* TX pause flags use I40E_AQ_LINK_TX_* above */
-	__le16	local_len;
-	__le16	remote_len;
-	u8	reserved2[2];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
-
-/* Configure LLDP MIB Change Event (direct 0x0A01)
- * also used for the event (with type in the command field)
- */
-struct i40e_aqc_lldp_update_mib {
-	u8	command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE	0x0
-#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE	0x1
-	u8	reserved[7];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
-
-/* Add LLDP TLV (indirect 0x0A02)
- * Delete LLDP TLV (indirect 0x0A04)
- */
-struct i40e_aqc_lldp_add_tlv {
-	u8	type; /* only nearest bridge and non-TPMR from 0x0A00 */
-	u8	reserved1[1];
-	__le16	len;
-	u8	reserved2[4];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
-
-/* Update LLDP TLV (indirect 0x0A03) */
-struct i40e_aqc_lldp_update_tlv {
-	u8	type; /* only nearest bridge and non-TPMR from 0x0A00 */
-	u8	reserved;
-	__le16	old_len;
-	__le16	new_offset;
-	__le16	new_len;
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
-
-/* Stop LLDP (direct 0x0A05) */
-struct i40e_aqc_lldp_stop {
-	u8	command;
-#define I40E_AQ_LLDP_AGENT_STOP		0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN	0x1
-	u8	reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
-
-/* Start LLDP (direct 0x0A06) */
-
-struct i40e_aqc_lldp_start {
-	u8	command;
-#define I40E_AQ_LLDP_AGENT_START	0x1
-	u8	reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
-
-/* Set DCB (direct 0x0303) */
-struct i40e_aqc_set_dcb_parameters {
-	u8 command;
-#define I40E_AQ_DCB_SET_AGENT	0x1
-#define I40E_DCB_VALID		0x1
-	u8 valid_flags;
-	u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
-
-/* Apply MIB changes (0x0A07)
- * uses the generic struc as it contains no data
- */
-
-/* Add Udp Tunnel command and completion (direct 0x0B00) */
-struct i40e_aqc_add_udp_tunnel {
-	__le16	udp_port;
-	u8	reserved0[3];
-	u8	protocol_type;
-#define I40E_AQC_TUNNEL_TYPE_VXLAN	0x00
-#define I40E_AQC_TUNNEL_TYPE_NGE	0x01
-#define I40E_AQC_TUNNEL_TYPE_TEREDO	0x10
-#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE	0x11
-	u8	reserved1[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
-
-struct i40e_aqc_add_udp_tunnel_completion {
-	__le16 udp_port;
-	u8	filter_entry_index;
-	u8	multiple_pfs;
-#define I40E_AQC_SINGLE_PF		0x0
-#define I40E_AQC_MULTIPLE_PFS		0x1
-	u8	total_filters;
-	u8	reserved[11];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
-
-/* remove UDP Tunnel command (0x0B01) */
-struct i40e_aqc_remove_udp_tunnel {
-	u8	reserved[2];
-	u8	index; /* 0 to 15 */
-	u8	reserved2[13];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
-
-struct i40e_aqc_del_udp_tunnel_completion {
-	__le16	udp_port;
-	u8	index; /* 0 to 15 */
-	u8	multiple_pfs;
-	u8	total_filters_used;
-	u8	reserved1[11];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
-
-struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID		BIT(15)
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
-					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
-	__le16	vsi_id;
-	u8	reserved[6];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
-
-struct i40e_aqc_get_set_rss_key_data {
-	u8 standard_rss_key[0x28];
-	u8 extended_hash_key[0xc];
-};
-
-I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
-
-struct  i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID		BIT(15)
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
-					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
-	__le16	vsi_id;
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
-				BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
-
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
-	__le16	flags;
-	u8	reserved[4];
-	__le32	addr_high;
-	__le32	addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
-
-/* tunnel key structure 0x0B10 */
-
-struct i40e_aqc_tunnel_key_structure_A0 {
-	__le16     key1_off;
-	__le16     key1_len;
-	__le16     key2_off;
-	__le16     key2_len;
-	__le16     flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
-/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS    0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED   0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
-	u8         resreved[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0);
-
-struct i40e_aqc_tunnel_key_structure {
-	u8	key1_off;
-	u8	key2_off;
-	u8	key1_len;  /* 0 to 15 */
-	u8	key2_len;  /* 0 to 15 */
-	u8	flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE	0x01
-/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS	0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED	0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN	0x03
-	u8	network_key_index;
-#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN		0x0
-#define I40E_AQC_NETWORK_KEY_INDEX_NGE			0x1
-#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP	0x2
-#define I40E_AQC_NETWORK_KEY_INDEX_GRE			0x3
-	u8	reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
-
-/* OEM mode commands (direct 0xFE0x) */
-struct i40e_aqc_oem_param_change {
-	__le32	param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL	0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL	1
-#define I40E_AQ_OEM_PARAM_MAC		2
-	__le32	param_value1;
-	__le16	param_value2;
-	u8	reserved[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
-
-struct i40e_aqc_oem_state_change {
-	__le32	state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN	0x0
-#define I40E_AQ_OEM_STATE_LINK_UP	0x1
-	u8	reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
-
-/* Initialize OCSD (0xFE02, direct) */
-struct i40e_aqc_opc_oem_ocsd_initialize {
-	u8 type_status;
-	u8 reserved1[3];
-	__le32 ocsd_memory_block_addr_high;
-	__le32 ocsd_memory_block_addr_low;
-	__le32 requested_update_interval;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
-
-/* Initialize OCBB  (0xFE03, direct) */
-struct i40e_aqc_opc_oem_ocbb_initialize {
-	u8 type_status;
-	u8 reserved1[3];
-	__le32 ocbb_memory_block_addr_high;
-	__le32 ocbb_memory_block_addr_low;
-	u8 reserved2[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
-
-/* debug commands */
-
-/* get device id (0xFF00) uses the generic structure */
-
-/* set test more (0xFF01, internal) */
-
-struct i40e_acq_set_test_mode {
-	u8	mode;
-#define I40E_AQ_TEST_PARTIAL	0
-#define I40E_AQ_TEST_FULL	1
-#define I40E_AQ_TEST_NVM	2
-	u8	reserved[3];
-	u8	command;
-#define I40E_AQ_TEST_OPEN	0
-#define I40E_AQ_TEST_CLOSE	1
-#define I40E_AQ_TEST_INC	2
-	u8	reserved2[3];
-	__le32	address_high;
-	__le32	address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
-
-/* Debug Read Register command (0xFF03)
- * Debug Write Register command (0xFF04)
- */
-struct i40e_aqc_debug_reg_read_write {
-	__le32 reserved;
-	__le32 address;
-	__le32 value_high;
-	__le32 value_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
-
-/* Scatter/gather Reg Read  (indirect 0xFF05)
- * Scatter/gather Reg Write (indirect 0xFF06)
- */
-
-/* i40e_aq_desc is used for the command */
-struct i40e_aqc_debug_reg_sg_element_data {
-	__le32 address;
-	__le32 value;
-};
-
-/* Debug Modify register (direct 0xFF07) */
-struct i40e_aqc_debug_modify_reg {
-	__le32 address;
-	__le32 value;
-	__le32 clear_mask;
-	__le32 set_mask;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
-
-/* dump internal data (0xFF08, indirect) */
-
-#define I40E_AQ_CLUSTER_ID_AUX		0
-#define I40E_AQ_CLUSTER_ID_SWITCH_FLU	1
-#define I40E_AQ_CLUSTER_ID_TXSCHED	2
-#define I40E_AQ_CLUSTER_ID_HMC		3
-#define I40E_AQ_CLUSTER_ID_MAC0		4
-#define I40E_AQ_CLUSTER_ID_MAC1		5
-#define I40E_AQ_CLUSTER_ID_MAC2		6
-#define I40E_AQ_CLUSTER_ID_MAC3		7
-#define I40E_AQ_CLUSTER_ID_DCB		8
-#define I40E_AQ_CLUSTER_ID_EMP_MEM	9
-#define I40E_AQ_CLUSTER_ID_PKT_BUF	10
-#define I40E_AQ_CLUSTER_ID_ALTRAM	11
-
-struct i40e_aqc_debug_dump_internals {
-	u8	cluster_id;
-	u8	table_id;
-	__le16	data_size;
-	__le32	idx;
-	__le32	address_high;
-	__le32	address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
-
-struct i40e_aqc_debug_modify_internals {
-	u8	cluster_id;
-	u8	cluster_specific_params[7];
-	__le32	address_high;
-	__le32	address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
-
-#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
deleted file mode 100644
index cb8689222c8b..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_ALLOC_H_
-#define _I40E_ALLOC_H_
-
-struct i40e_hw;
-
-/* Memory allocation types */
-enum i40e_memory_type {
-	i40e_mem_arq_buf = 0,		/* ARQ indirect command buffer */
-	i40e_mem_asq_buf = 1,
-	i40e_mem_atq_buf = 2,		/* ATQ indirect command buffer */
-	i40e_mem_arq_ring = 3,		/* ARQ descriptor ring */
-	i40e_mem_atq_ring = 4,		/* ATQ descriptor ring */
-	i40e_mem_pd = 5,		/* Page Descriptor */
-	i40e_mem_bp = 6,		/* Backing Page - 4KB */
-	i40e_mem_bp_jumbo = 7,		/* Backing Page - > 4KB */
-	i40e_mem_reserved
-};
-
-/* prototype for functions used for dynamic memory allocation */
-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
-					    struct i40e_dma_mem *mem,
-					    enum i40e_memory_type type,
-					    u64 size, u32 alignment);
-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
-					struct i40e_dma_mem *mem);
-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
-					     struct i40e_virt_mem *mem,
-					     u32 size);
-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
-					 struct i40e_virt_mem *mem);
-
-#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
deleted file mode 100644
index eea280ba411e..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ /dev/null
@@ -1,1320 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#include "i40e_type.h"
-#include "i40e_adminq.h"
-#include "i40e_prototype.h"
-#include <linux/avf/virtchnl.h>
-
-/**
- * i40e_set_mac_type - Sets MAC type
- * @hw: pointer to the HW structure
- *
- * This function sets the mac type of the adapter based on the
- * vendor ID and device ID stored in the hw structure.
- **/
-i40e_status i40e_set_mac_type(struct i40e_hw *hw)
-{
-	i40e_status status = 0;
-
-	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
-		switch (hw->device_id) {
-		case I40E_DEV_ID_SFP_XL710:
-		case I40E_DEV_ID_QEMU:
-		case I40E_DEV_ID_KX_B:
-		case I40E_DEV_ID_KX_C:
-		case I40E_DEV_ID_QSFP_A:
-		case I40E_DEV_ID_QSFP_B:
-		case I40E_DEV_ID_QSFP_C:
-		case I40E_DEV_ID_10G_BASE_T:
-		case I40E_DEV_ID_10G_BASE_T4:
-		case I40E_DEV_ID_20G_KR2:
-		case I40E_DEV_ID_20G_KR2_A:
-		case I40E_DEV_ID_25G_B:
-		case I40E_DEV_ID_25G_SFP28:
-			hw->mac.type = I40E_MAC_XL710;
-			break;
-		case I40E_DEV_ID_SFP_X722:
-		case I40E_DEV_ID_1G_BASE_T_X722:
-		case I40E_DEV_ID_10G_BASE_T_X722:
-		case I40E_DEV_ID_SFP_I_X722:
-			hw->mac.type = I40E_MAC_X722;
-			break;
-		case I40E_DEV_ID_X722_VF:
-			hw->mac.type = I40E_MAC_X722_VF;
-			break;
-		case I40E_DEV_ID_VF:
-		case I40E_DEV_ID_VF_HV:
-		case I40E_DEV_ID_ADAPTIVE_VF:
-			hw->mac.type = I40E_MAC_VF;
-			break;
-		default:
-			hw->mac.type = I40E_MAC_GENERIC;
-			break;
-		}
-	} else {
-		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
-	}
-
-	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
-		  hw->mac.type, status);
-	return status;
-}
-
-/**
- * i40evf_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
-{
-	switch (aq_err) {
-	case I40E_AQ_RC_OK:
-		return "OK";
-	case I40E_AQ_RC_EPERM:
-		return "I40E_AQ_RC_EPERM";
-	case I40E_AQ_RC_ENOENT:
-		return "I40E_AQ_RC_ENOENT";
-	case I40E_AQ_RC_ESRCH:
-		return "I40E_AQ_RC_ESRCH";
-	case I40E_AQ_RC_EINTR:
-		return "I40E_AQ_RC_EINTR";
-	case I40E_AQ_RC_EIO:
-		return "I40E_AQ_RC_EIO";
-	case I40E_AQ_RC_ENXIO:
-		return "I40E_AQ_RC_ENXIO";
-	case I40E_AQ_RC_E2BIG:
-		return "I40E_AQ_RC_E2BIG";
-	case I40E_AQ_RC_EAGAIN:
-		return "I40E_AQ_RC_EAGAIN";
-	case I40E_AQ_RC_ENOMEM:
-		return "I40E_AQ_RC_ENOMEM";
-	case I40E_AQ_RC_EACCES:
-		return "I40E_AQ_RC_EACCES";
-	case I40E_AQ_RC_EFAULT:
-		return "I40E_AQ_RC_EFAULT";
-	case I40E_AQ_RC_EBUSY:
-		return "I40E_AQ_RC_EBUSY";
-	case I40E_AQ_RC_EEXIST:
-		return "I40E_AQ_RC_EEXIST";
-	case I40E_AQ_RC_EINVAL:
-		return "I40E_AQ_RC_EINVAL";
-	case I40E_AQ_RC_ENOTTY:
-		return "I40E_AQ_RC_ENOTTY";
-	case I40E_AQ_RC_ENOSPC:
-		return "I40E_AQ_RC_ENOSPC";
-	case I40E_AQ_RC_ENOSYS:
-		return "I40E_AQ_RC_ENOSYS";
-	case I40E_AQ_RC_ERANGE:
-		return "I40E_AQ_RC_ERANGE";
-	case I40E_AQ_RC_EFLUSHED:
-		return "I40E_AQ_RC_EFLUSHED";
-	case I40E_AQ_RC_BAD_ADDR:
-		return "I40E_AQ_RC_BAD_ADDR";
-	case I40E_AQ_RC_EMODE:
-		return "I40E_AQ_RC_EMODE";
-	case I40E_AQ_RC_EFBIG:
-		return "I40E_AQ_RC_EFBIG";
-	}
-
-	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
-	return hw->err_str;
-}
-
-/**
- * i40evf_stat_str - convert status err code to a string
- * @hw: pointer to the HW structure
- * @stat_err: the status error code to convert
- **/
-const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
-{
-	switch (stat_err) {
-	case 0:
-		return "OK";
-	case I40E_ERR_NVM:
-		return "I40E_ERR_NVM";
-	case I40E_ERR_NVM_CHECKSUM:
-		return "I40E_ERR_NVM_CHECKSUM";
-	case I40E_ERR_PHY:
-		return "I40E_ERR_PHY";
-	case I40E_ERR_CONFIG:
-		return "I40E_ERR_CONFIG";
-	case I40E_ERR_PARAM:
-		return "I40E_ERR_PARAM";
-	case I40E_ERR_MAC_TYPE:
-		return "I40E_ERR_MAC_TYPE";
-	case I40E_ERR_UNKNOWN_PHY:
-		return "I40E_ERR_UNKNOWN_PHY";
-	case I40E_ERR_LINK_SETUP:
-		return "I40E_ERR_LINK_SETUP";
-	case I40E_ERR_ADAPTER_STOPPED:
-		return "I40E_ERR_ADAPTER_STOPPED";
-	case I40E_ERR_INVALID_MAC_ADDR:
-		return "I40E_ERR_INVALID_MAC_ADDR";
-	case I40E_ERR_DEVICE_NOT_SUPPORTED:
-		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
-	case I40E_ERR_MASTER_REQUESTS_PENDING:
-		return "I40E_ERR_MASTER_REQUESTS_PENDING";
-	case I40E_ERR_INVALID_LINK_SETTINGS:
-		return "I40E_ERR_INVALID_LINK_SETTINGS";
-	case I40E_ERR_AUTONEG_NOT_COMPLETE:
-		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
-	case I40E_ERR_RESET_FAILED:
-		return "I40E_ERR_RESET_FAILED";
-	case I40E_ERR_SWFW_SYNC:
-		return "I40E_ERR_SWFW_SYNC";
-	case I40E_ERR_NO_AVAILABLE_VSI:
-		return "I40E_ERR_NO_AVAILABLE_VSI";
-	case I40E_ERR_NO_MEMORY:
-		return "I40E_ERR_NO_MEMORY";
-	case I40E_ERR_BAD_PTR:
-		return "I40E_ERR_BAD_PTR";
-	case I40E_ERR_RING_FULL:
-		return "I40E_ERR_RING_FULL";
-	case I40E_ERR_INVALID_PD_ID:
-		return "I40E_ERR_INVALID_PD_ID";
-	case I40E_ERR_INVALID_QP_ID:
-		return "I40E_ERR_INVALID_QP_ID";
-	case I40E_ERR_INVALID_CQ_ID:
-		return "I40E_ERR_INVALID_CQ_ID";
-	case I40E_ERR_INVALID_CEQ_ID:
-		return "I40E_ERR_INVALID_CEQ_ID";
-	case I40E_ERR_INVALID_AEQ_ID:
-		return "I40E_ERR_INVALID_AEQ_ID";
-	case I40E_ERR_INVALID_SIZE:
-		return "I40E_ERR_INVALID_SIZE";
-	case I40E_ERR_INVALID_ARP_INDEX:
-		return "I40E_ERR_INVALID_ARP_INDEX";
-	case I40E_ERR_INVALID_FPM_FUNC_ID:
-		return "I40E_ERR_INVALID_FPM_FUNC_ID";
-	case I40E_ERR_QP_INVALID_MSG_SIZE:
-		return "I40E_ERR_QP_INVALID_MSG_SIZE";
-	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
-		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
-	case I40E_ERR_INVALID_FRAG_COUNT:
-		return "I40E_ERR_INVALID_FRAG_COUNT";
-	case I40E_ERR_QUEUE_EMPTY:
-		return "I40E_ERR_QUEUE_EMPTY";
-	case I40E_ERR_INVALID_ALIGNMENT:
-		return "I40E_ERR_INVALID_ALIGNMENT";
-	case I40E_ERR_FLUSHED_QUEUE:
-		return "I40E_ERR_FLUSHED_QUEUE";
-	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
-		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
-	case I40E_ERR_INVALID_IMM_DATA_SIZE:
-		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
-	case I40E_ERR_TIMEOUT:
-		return "I40E_ERR_TIMEOUT";
-	case I40E_ERR_OPCODE_MISMATCH:
-		return "I40E_ERR_OPCODE_MISMATCH";
-	case I40E_ERR_CQP_COMPL_ERROR:
-		return "I40E_ERR_CQP_COMPL_ERROR";
-	case I40E_ERR_INVALID_VF_ID:
-		return "I40E_ERR_INVALID_VF_ID";
-	case I40E_ERR_INVALID_HMCFN_ID:
-		return "I40E_ERR_INVALID_HMCFN_ID";
-	case I40E_ERR_BACKING_PAGE_ERROR:
-		return "I40E_ERR_BACKING_PAGE_ERROR";
-	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
-		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
-	case I40E_ERR_INVALID_PBLE_INDEX:
-		return "I40E_ERR_INVALID_PBLE_INDEX";
-	case I40E_ERR_INVALID_SD_INDEX:
-		return "I40E_ERR_INVALID_SD_INDEX";
-	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
-		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
-	case I40E_ERR_INVALID_SD_TYPE:
-		return "I40E_ERR_INVALID_SD_TYPE";
-	case I40E_ERR_MEMCPY_FAILED:
-		return "I40E_ERR_MEMCPY_FAILED";
-	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
-		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
-	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
-		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
-	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
-		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
-	case I40E_ERR_SRQ_ENABLED:
-		return "I40E_ERR_SRQ_ENABLED";
-	case I40E_ERR_ADMIN_QUEUE_ERROR:
-		return "I40E_ERR_ADMIN_QUEUE_ERROR";
-	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
-		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
-	case I40E_ERR_BUF_TOO_SHORT:
-		return "I40E_ERR_BUF_TOO_SHORT";
-	case I40E_ERR_ADMIN_QUEUE_FULL:
-		return "I40E_ERR_ADMIN_QUEUE_FULL";
-	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
-		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
-	case I40E_ERR_BAD_IWARP_CQE:
-		return "I40E_ERR_BAD_IWARP_CQE";
-	case I40E_ERR_NVM_BLANK_MODE:
-		return "I40E_ERR_NVM_BLANK_MODE";
-	case I40E_ERR_NOT_IMPLEMENTED:
-		return "I40E_ERR_NOT_IMPLEMENTED";
-	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
-		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
-	case I40E_ERR_DIAG_TEST_FAILED:
-		return "I40E_ERR_DIAG_TEST_FAILED";
-	case I40E_ERR_NOT_READY:
-		return "I40E_ERR_NOT_READY";
-	case I40E_NOT_SUPPORTED:
-		return "I40E_NOT_SUPPORTED";
-	case I40E_ERR_FIRMWARE_API_VERSION:
-		return "I40E_ERR_FIRMWARE_API_VERSION";
-	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
-		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
-	}
-
-	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
-	return hw->err_str;
-}
-
-/**
- * i40evf_debug_aq
- * @hw: debug mask related to admin queue
- * @mask: debug mask
- * @desc: pointer to admin queue descriptor
- * @buffer: pointer to command buffer
- * @buf_len: max length of buffer
- *
- * Dumps debug log about adminq command with descriptor contents.
- **/
-void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
-		   void *buffer, u16 buf_len)
-{
-	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
-	u8 *buf = (u8 *)buffer;
-
-	if ((!(mask & hw->debug_mask)) || (desc == NULL))
-		return;
-
-	i40e_debug(hw, mask,
-		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
-		   le16_to_cpu(aq_desc->opcode),
-		   le16_to_cpu(aq_desc->flags),
-		   le16_to_cpu(aq_desc->datalen),
-		   le16_to_cpu(aq_desc->retval));
-	i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
-		   le32_to_cpu(aq_desc->cookie_high),
-		   le32_to_cpu(aq_desc->cookie_low));
-	i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
-		   le32_to_cpu(aq_desc->params.internal.param0),
-		   le32_to_cpu(aq_desc->params.internal.param1));
-	i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
-		   le32_to_cpu(aq_desc->params.external.addr_high),
-		   le32_to_cpu(aq_desc->params.external.addr_low));
-
-	if ((buffer != NULL) && (aq_desc->datalen != 0)) {
-		u16 len = le16_to_cpu(aq_desc->datalen);
-
-		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
-		if (buf_len < len)
-			len = buf_len;
-		/* write the full 16-byte chunks */
-		if (hw->debug_mask & mask) {
-			char prefix[27];
-
-			snprintf(prefix, sizeof(prefix),
-				 "i40evf %02x:%02x.%x: \t0x",
-				 hw->bus.bus_id,
-				 hw->bus.device,
-				 hw->bus.func);
-
-			print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
-				       16, 1, buf, len, false);
-		}
-	}
-}
-
-/**
- * i40evf_check_asq_alive
- * @hw: pointer to the hw struct
- *
- * Returns true if Queue is enabled else false.
- **/
-bool i40evf_check_asq_alive(struct i40e_hw *hw)
-{
-	if (hw->aq.asq.len)
-		return !!(rd32(hw, hw->aq.asq.len) &
-			  I40E_VF_ATQLEN1_ATQENABLE_MASK);
-	else
-		return false;
-}
-
-/**
- * i40evf_aq_queue_shutdown
- * @hw: pointer to the hw struct
- * @unloading: is the driver unloading itself
- *
- * Tell the Firmware that we're shutting down the AdminQ and whether
- * or not the driver is unloading as well.
- **/
-i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
-					     bool unloading)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_queue_shutdown *cmd =
-		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
-	i40e_status status;
-
-	i40evf_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_queue_shutdown);
-
-	if (unloading)
-		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
-	status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
-
-	return status;
-}
-
-/**
- * i40e_aq_get_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- * @set: set true to set the table, false to get the table
- *
- * Internal function to get or set RSS look up table
- **/
-static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
-					   u16 vsi_id, bool pf_lut,
-					   u8 *lut, u16 lut_size,
-					   bool set)
-{
-	i40e_status status;
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_get_set_rss_lut *cmd_resp =
-		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
-
-	if (set)
-		i40evf_fill_default_direct_cmd_desc(&desc,
-						    i40e_aqc_opc_set_rss_lut);
-	else
-		i40evf_fill_default_direct_cmd_desc(&desc,
-						    i40e_aqc_opc_get_rss_lut);
-
-	/* Indirect command */
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
-
-	cmd_resp->vsi_id =
-			cpu_to_le16((u16)((vsi_id <<
-					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
-					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
-	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
-
-	if (pf_lut)
-		cmd_resp->flags |= cpu_to_le16((u16)
-					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
-					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
-					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
-	else
-		cmd_resp->flags |= cpu_to_le16((u16)
-					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
-					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
-					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
-
-	status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
-
-	return status;
-}
-
-/**
- * i40evf_aq_get_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * get the RSS lookup table, PF or VSI type
- **/
-i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
-				  bool pf_lut, u8 *lut, u16 lut_size)
-{
-	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
-				       false);
-}
-
-/**
- * i40evf_aq_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * set the RSS lookup table, PF or VSI type
- **/
-i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
-				  bool pf_lut, u8 *lut, u16 lut_size)
-{
-	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
-}
-
-/**
- * i40e_aq_get_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- * @set: set true to set the key, false to get the key
- *
- * get the RSS key per VSI
- **/
-static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
-				      u16 vsi_id,
-				      struct i40e_aqc_get_set_rss_key_data *key,
-				      bool set)
-{
-	i40e_status status;
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_get_set_rss_key *cmd_resp =
-			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
-	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
-
-	if (set)
-		i40evf_fill_default_direct_cmd_desc(&desc,
-						    i40e_aqc_opc_set_rss_key);
-	else
-		i40evf_fill_default_direct_cmd_desc(&desc,
-						    i40e_aqc_opc_get_rss_key);
-
-	/* Indirect command */
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
-
-	cmd_resp->vsi_id =
-			cpu_to_le16((u16)((vsi_id <<
-					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
-					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
-	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
-
-	status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
-
-	return status;
-}
-
-/**
- * i40evf_aq_get_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- **/
-i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
-				  u16 vsi_id,
-				  struct i40e_aqc_get_set_rss_key_data *key)
-{
-	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
-}
-
-/**
- * i40evf_aq_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- * set the RSS key per VSI
- **/
-i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
-				  u16 vsi_id,
-				  struct i40e_aqc_get_set_rss_key_data *key)
-{
-	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
-}
-
-
-/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT i40evf_ptype_lookup[ptype].known
- * THEN
- *      Packet is unknown
- * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
- *      Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- *      Use the enum i40e_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short */
-#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
-	{	PTYPE, \
-		1, \
-		I40E_RX_PTYPE_OUTER_##OUTER_IP, \
-		I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
-		I40E_RX_PTYPE_##OUTER_FRAG, \
-		I40E_RX_PTYPE_TUNNEL_##T, \
-		I40E_RX_PTYPE_TUNNEL_END_##TE, \
-		I40E_RX_PTYPE_##TEF, \
-		I40E_RX_PTYPE_INNER_PROT_##I, \
-		I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
-		{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define I40E_RX_PTYPE_NOF		I40E_RX_PTYPE_NOT_FRAG
-#define I40E_RX_PTYPE_FRG		I40E_RX_PTYPE_FRAG
-#define I40E_RX_PTYPE_INNER_PROT_TS	I40E_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping the HW PTYPE to the bit field for decoding */
-struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
-	/* L2 Packet types */
-	I40E_PTT_UNUSED_ENTRY(0),
-	I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
-	I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
-	I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
-	I40E_PTT_UNUSED_ENTRY(4),
-	I40E_PTT_UNUSED_ENTRY(5),
-	I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
-	I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
-	I40E_PTT_UNUSED_ENTRY(8),
-	I40E_PTT_UNUSED_ENTRY(9),
-	I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
-	I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
-	I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
-	/* Non Tunneled IPv4 */
-	I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(25),
-	I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
-	I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
-	I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
-	/* IPv4 --> IPv4 */
-	I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
-	I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
-	I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(32),
-	I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
-	I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
-	I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
-	/* IPv4 --> IPv6 */
-	I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
-	I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
-	I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(39),
-	I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
-	I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
-	I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
-	/* IPv4 --> GRE/NAT */
-	I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
-	/* IPv4 --> GRE/NAT --> IPv4 */
-	I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
-	I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
-	I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(47),
-	I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
-	I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
-	I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
-	/* IPv4 --> GRE/NAT --> IPv6 */
-	I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
-	I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
-	I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(54),
-	I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
-	I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
-	I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
-	/* IPv4 --> GRE/NAT --> MAC */
-	I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
-	/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
-	I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
-	I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
-	I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(62),
-	I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
-	I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
-	I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
-	/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
-	I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
-	I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
-	I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(69),
-	I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
-	I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
-	I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
-	/* IPv4 --> GRE/NAT --> MAC/VLAN */
-	I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
-	/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
-	I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
-	I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
-	I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(77),
-	I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
-	I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
-	I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
-	/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
-	I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
-	I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
-	I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(84),
-	I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
-	I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
-	I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
-	/* Non Tunneled IPv6 */
-	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
-	I40E_PTT_UNUSED_ENTRY(91),
-	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
-	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
-	I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
-	/* IPv6 --> IPv4 */
-	I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
-	I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
-	I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(98),
-	I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
-	I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
-	I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
-	/* IPv6 --> IPv6 */
-	I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
-	I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
-	I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(105),
-	I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
-	I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
-	I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
-	/* IPv6 --> GRE/NAT */
-	I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
-	/* IPv6 --> GRE/NAT -> IPv4 */
-	I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
-	I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
-	I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(113),
-	I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
-	I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
-	I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
-	/* IPv6 --> GRE/NAT -> IPv6 */
-	I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
-	I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
-	I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(120),
-	I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
-	I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
-	I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
-	/* IPv6 --> GRE/NAT -> MAC */
-	I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
-	/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
-	I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
-	I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
-	I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(128),
-	I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
-	I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
-	I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
-	/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
-	I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
-	I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
-	I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(135),
-	I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
-	I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
-	I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
-	/* IPv6 --> GRE/NAT -> MAC/VLAN */
-	I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
-	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
-	I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
-	I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
-	I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(143),
-	I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
-	I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
-	I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
-	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
-	I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
-	I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
-	I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
-	I40E_PTT_UNUSED_ENTRY(150),
-	I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
-	I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
-	I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
-	/* unused entries */
-	I40E_PTT_UNUSED_ENTRY(154),
-	I40E_PTT_UNUSED_ENTRY(155),
-	I40E_PTT_UNUSED_ENTRY(156),
-	I40E_PTT_UNUSED_ENTRY(157),
-	I40E_PTT_UNUSED_ENTRY(158),
-	I40E_PTT_UNUSED_ENTRY(159),
-
-	I40E_PTT_UNUSED_ENTRY(160),
-	I40E_PTT_UNUSED_ENTRY(161),
-	I40E_PTT_UNUSED_ENTRY(162),
-	I40E_PTT_UNUSED_ENTRY(163),
-	I40E_PTT_UNUSED_ENTRY(164),
-	I40E_PTT_UNUSED_ENTRY(165),
-	I40E_PTT_UNUSED_ENTRY(166),
-	I40E_PTT_UNUSED_ENTRY(167),
-	I40E_PTT_UNUSED_ENTRY(168),
-	I40E_PTT_UNUSED_ENTRY(169),
-
-	I40E_PTT_UNUSED_ENTRY(170),
-	I40E_PTT_UNUSED_ENTRY(171),
-	I40E_PTT_UNUSED_ENTRY(172),
-	I40E_PTT_UNUSED_ENTRY(173),
-	I40E_PTT_UNUSED_ENTRY(174),
-	I40E_PTT_UNUSED_ENTRY(175),
-	I40E_PTT_UNUSED_ENTRY(176),
-	I40E_PTT_UNUSED_ENTRY(177),
-	I40E_PTT_UNUSED_ENTRY(178),
-	I40E_PTT_UNUSED_ENTRY(179),
-
-	I40E_PTT_UNUSED_ENTRY(180),
-	I40E_PTT_UNUSED_ENTRY(181),
-	I40E_PTT_UNUSED_ENTRY(182),
-	I40E_PTT_UNUSED_ENTRY(183),
-	I40E_PTT_UNUSED_ENTRY(184),
-	I40E_PTT_UNUSED_ENTRY(185),
-	I40E_PTT_UNUSED_ENTRY(186),
-	I40E_PTT_UNUSED_ENTRY(187),
-	I40E_PTT_UNUSED_ENTRY(188),
-	I40E_PTT_UNUSED_ENTRY(189),
-
-	I40E_PTT_UNUSED_ENTRY(190),
-	I40E_PTT_UNUSED_ENTRY(191),
-	I40E_PTT_UNUSED_ENTRY(192),
-	I40E_PTT_UNUSED_ENTRY(193),
-	I40E_PTT_UNUSED_ENTRY(194),
-	I40E_PTT_UNUSED_ENTRY(195),
-	I40E_PTT_UNUSED_ENTRY(196),
-	I40E_PTT_UNUSED_ENTRY(197),
-	I40E_PTT_UNUSED_ENTRY(198),
-	I40E_PTT_UNUSED_ENTRY(199),
-
-	I40E_PTT_UNUSED_ENTRY(200),
-	I40E_PTT_UNUSED_ENTRY(201),
-	I40E_PTT_UNUSED_ENTRY(202),
-	I40E_PTT_UNUSED_ENTRY(203),
-	I40E_PTT_UNUSED_ENTRY(204),
-	I40E_PTT_UNUSED_ENTRY(205),
-	I40E_PTT_UNUSED_ENTRY(206),
-	I40E_PTT_UNUSED_ENTRY(207),
-	I40E_PTT_UNUSED_ENTRY(208),
-	I40E_PTT_UNUSED_ENTRY(209),
-
-	I40E_PTT_UNUSED_ENTRY(210),
-	I40E_PTT_UNUSED_ENTRY(211),
-	I40E_PTT_UNUSED_ENTRY(212),
-	I40E_PTT_UNUSED_ENTRY(213),
-	I40E_PTT_UNUSED_ENTRY(214),
-	I40E_PTT_UNUSED_ENTRY(215),
-	I40E_PTT_UNUSED_ENTRY(216),
-	I40E_PTT_UNUSED_ENTRY(217),
-	I40E_PTT_UNUSED_ENTRY(218),
-	I40E_PTT_UNUSED_ENTRY(219),
-
-	I40E_PTT_UNUSED_ENTRY(220),
-	I40E_PTT_UNUSED_ENTRY(221),
-	I40E_PTT_UNUSED_ENTRY(222),
-	I40E_PTT_UNUSED_ENTRY(223),
-	I40E_PTT_UNUSED_ENTRY(224),
-	I40E_PTT_UNUSED_ENTRY(225),
-	I40E_PTT_UNUSED_ENTRY(226),
-	I40E_PTT_UNUSED_ENTRY(227),
-	I40E_PTT_UNUSED_ENTRY(228),
-	I40E_PTT_UNUSED_ENTRY(229),
-
-	I40E_PTT_UNUSED_ENTRY(230),
-	I40E_PTT_UNUSED_ENTRY(231),
-	I40E_PTT_UNUSED_ENTRY(232),
-	I40E_PTT_UNUSED_ENTRY(233),
-	I40E_PTT_UNUSED_ENTRY(234),
-	I40E_PTT_UNUSED_ENTRY(235),
-	I40E_PTT_UNUSED_ENTRY(236),
-	I40E_PTT_UNUSED_ENTRY(237),
-	I40E_PTT_UNUSED_ENTRY(238),
-	I40E_PTT_UNUSED_ENTRY(239),
-
-	I40E_PTT_UNUSED_ENTRY(240),
-	I40E_PTT_UNUSED_ENTRY(241),
-	I40E_PTT_UNUSED_ENTRY(242),
-	I40E_PTT_UNUSED_ENTRY(243),
-	I40E_PTT_UNUSED_ENTRY(244),
-	I40E_PTT_UNUSED_ENTRY(245),
-	I40E_PTT_UNUSED_ENTRY(246),
-	I40E_PTT_UNUSED_ENTRY(247),
-	I40E_PTT_UNUSED_ENTRY(248),
-	I40E_PTT_UNUSED_ENTRY(249),
-
-	I40E_PTT_UNUSED_ENTRY(250),
-	I40E_PTT_UNUSED_ENTRY(251),
-	I40E_PTT_UNUSED_ENTRY(252),
-	I40E_PTT_UNUSED_ENTRY(253),
-	I40E_PTT_UNUSED_ENTRY(254),
-	I40E_PTT_UNUSED_ENTRY(255)
-};
-
-/**
- * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register
- * @hw: pointer to the hw struct
- * @reg_addr: register address
- * @reg_val: ptr to register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Use the firmware to read the Rx control register,
- * especially useful if the Rx unit is under heavy pressure
- **/
-i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
-				u32 reg_addr, u32 *reg_val,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
-		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
-	i40e_status status;
-
-	if (!reg_val)
-		return I40E_ERR_PARAM;
-
-	i40evf_fill_default_direct_cmd_desc(&desc,
-					    i40e_aqc_opc_rx_ctl_reg_read);
-
-	cmd_resp->address = cpu_to_le32(reg_addr);
-
-	status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (status == 0)
-		*reg_val = le32_to_cpu(cmd_resp->value);
-
-	return status;
-}
-
-/**
- * i40evf_read_rx_ctl - read from an Rx control register
- * @hw: pointer to the hw struct
- * @reg_addr: register address
- **/
-u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
-{
-	i40e_status status = 0;
-	bool use_register;
-	int retry = 5;
-	u32 val = 0;
-
-	use_register = (((hw->aq.api_maj_ver == 1) &&
-			(hw->aq.api_min_ver < 5)) ||
-			(hw->mac.type == I40E_MAC_X722));
-	if (!use_register) {
-do_retry:
-		status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
-							&val, NULL);
-		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
-			usleep_range(1000, 2000);
-			retry--;
-			goto do_retry;
-		}
-	}
-
-	/* if the AQ access failed, try the old-fashioned way */
-	if (status || use_register)
-		val = rd32(hw, reg_addr);
-
-	return val;
-}
-
-/**
- * i40evf_aq_rx_ctl_write_register
- * @hw: pointer to the hw struct
- * @reg_addr: register address
- * @reg_val: register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Use the firmware to write to an Rx control register,
- * especially useful if the Rx unit is under heavy pressure
- **/
-i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
-				u32 reg_addr, u32 reg_val,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_rx_ctl_reg_read_write *cmd =
-		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
-	i40e_status status;
-
-	i40evf_fill_default_direct_cmd_desc(&desc,
-					    i40e_aqc_opc_rx_ctl_reg_write);
-
-	cmd->address = cpu_to_le32(reg_addr);
-	cmd->value = cpu_to_le32(reg_val);
-
-	status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40evf_write_rx_ctl - write to an Rx control register
- * @hw: pointer to the hw struct
- * @reg_addr: register address
- * @reg_val: register value
- **/
-void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
-{
-	i40e_status status = 0;
-	bool use_register;
-	int retry = 5;
-
-	use_register = (((hw->aq.api_maj_ver == 1) &&
-			(hw->aq.api_min_ver < 5)) ||
-			(hw->mac.type == I40E_MAC_X722));
-	if (!use_register) {
-do_retry:
-		status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
-							 reg_val, NULL);
-		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
-			usleep_range(1000, 2000);
-			retry--;
-			goto do_retry;
-		}
-	}
-
-	/* if the AQ access failed, try the old-fashioned way */
-	if (status || use_register)
-		wr32(hw, reg_addr, reg_val);
-}
-
-/**
- * i40e_aq_send_msg_to_pf
- * @hw: pointer to the hardware structure
- * @v_opcode: opcodes for VF-PF communication
- * @v_retval: return error code
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @cmd_details: pointer to command details
- *
- * Send message to PF driver using admin queue. By default, this message
- * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
- * completion before returning.
- **/
-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
-				enum virtchnl_ops v_opcode,
-				i40e_status v_retval,
-				u8 *msg, u16 msglen,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_asq_cmd_details details;
-	i40e_status status;
-
-	i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
-	desc.cookie_high = cpu_to_le32(v_opcode);
-	desc.cookie_low = cpu_to_le32(v_retval);
-	if (msglen) {
-		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
-						| I40E_AQ_FLAG_RD));
-		if (msglen > I40E_AQ_LARGE_BUF)
-			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-		desc.datalen = cpu_to_le16(msglen);
-	}
-	if (!cmd_details) {
-		memset(&details, 0, sizeof(details));
-		details.async = true;
-		cmd_details = &details;
-	}
-	status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
-	return status;
-}
-
-/**
- * i40e_vf_parse_hw_config
- * @hw: pointer to the hardware structure
- * @msg: pointer to the virtual channel VF resource structure
- *
- * Given a VF resource message from the PF, populate the hw struct
- * with appropriate information.
- **/
-void i40e_vf_parse_hw_config(struct i40e_hw *hw,
-			     struct virtchnl_vf_resource *msg)
-{
-	struct virtchnl_vsi_resource *vsi_res;
-	int i;
-
-	vsi_res = &msg->vsi_res[0];
-
-	hw->dev_caps.num_vsis = msg->num_vsis;
-	hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
-	hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
-	hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
-	hw->dev_caps.dcb = msg->vf_cap_flags &
-			   VIRTCHNL_VF_OFFLOAD_L2;
-	hw->dev_caps.fcoe = 0;
-	for (i = 0; i < msg->num_vsis; i++) {
-		if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
-			ether_addr_copy(hw->mac.perm_addr,
-					vsi_res->default_mac_addr);
-			ether_addr_copy(hw->mac.addr,
-					vsi_res->default_mac_addr);
-		}
-		vsi_res++;
-	}
-}
-
-/**
- * i40e_vf_reset
- * @hw: pointer to the hardware structure
- *
- * Send a VF_RESET message to the PF. Does not wait for response from PF
- * as none will be forthcoming. Immediately after calling this function,
- * the admin queue should be shut down and (optionally) reinitialized.
- **/
-i40e_status i40e_vf_reset(struct i40e_hw *hw)
-{
-	return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
-				      0, NULL, 0, NULL);
-}
-
-/**
- * i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
- * @hw: pointer to the hw struct
- * @buff: command buffer (size in bytes = buff_size)
- * @buff_size: buffer size in bytes
- * @track_id: package tracking id
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum
-i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
-				     u16 buff_size, u32 track_id,
-				     u32 *error_offset, u32 *error_info,
-				     struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_write_personalization_profile *cmd =
-		(struct i40e_aqc_write_personalization_profile *)
-		&desc.params.raw;
-	struct i40e_aqc_write_ddp_resp *resp;
-	i40e_status status;
-
-	i40evf_fill_default_direct_cmd_desc(&desc,
-					    i40e_aqc_opc_write_personalization_profile);
-
-	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
-	if (buff_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
-	desc.datalen = cpu_to_le16(buff_size);
-
-	cmd->profile_track_id = cpu_to_le32(track_id);
-
-	status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
-	if (!status) {
-		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
-		if (error_offset)
-			*error_offset = le32_to_cpu(resp->error_offset);
-		if (error_info)
-			*error_info = le32_to_cpu(resp->error_info);
-	}
-
-	return status;
-}
-
-/**
- * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
- * @hw: pointer to the hw struct
- * @buff: command buffer (size in bytes = buff_size)
- * @buff_size: buffer size in bytes
- * @flags: AdminQ command flags
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum
-i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
-					u16 buff_size, u8 flags,
-				       struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_get_applied_profiles *cmd =
-		(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
-	i40e_status status;
-
-	i40evf_fill_default_direct_cmd_desc(&desc,
-					    i40e_aqc_opc_get_personalization_profile_list);
-
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
-	if (buff_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-	desc.datalen = cpu_to_le16(buff_size);
-
-	cmd->flags = flags;
-
-	status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
-
-	return status;
-}
-
-/**
- * i40evf_find_segment_in_package
- * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
- * @pkg_hdr: pointer to the package header to be searched
- *
- * This function searches a package file for a particular segment type. On
- * success it returns a pointer to the segment header, otherwise it will
- * return NULL.
- **/
-struct i40e_generic_seg_header *
-i40evf_find_segment_in_package(u32 segment_type,
-			       struct i40e_package_header *pkg_hdr)
-{
-	struct i40e_generic_seg_header *segment;
-	u32 i;
-
-	/* Search all package segments for the requested segment type */
-	for (i = 0; i < pkg_hdr->segment_count; i++) {
-		segment =
-			(struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
-			 pkg_hdr->segment_offset[i]);
-
-		if (segment->type == segment_type)
-			return segment;
-	}
-
-	return NULL;
-}
-
-/**
- * i40evf_write_profile
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package to be downloaded
- * @track_id: package tracking id
- *
- * Handles the download of a complete package.
- */
-enum i40e_status_code
-i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
-		     u32 track_id)
-{
-	i40e_status status = 0;
-	struct i40e_section_table *sec_tbl;
-	struct i40e_profile_section_header *sec = NULL;
-	u32 dev_cnt;
-	u32 vendor_dev_id;
-	u32 *nvm;
-	u32 section_size = 0;
-	u32 offset = 0, info = 0;
-	u32 i;
-
-	dev_cnt = profile->device_table_count;
-
-	for (i = 0; i < dev_cnt; i++) {
-		vendor_dev_id = profile->device_table[i].vendor_dev_id;
-		if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
-			if (hw->device_id == (vendor_dev_id & 0xFFFF))
-				break;
-	}
-	if (i == dev_cnt) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
-		return I40E_ERR_DEVICE_NOT_SUPPORTED;
-	}
-
-	nvm = (u32 *)&profile->device_table[dev_cnt];
-	sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
-
-	for (i = 0; i < sec_tbl->section_count; i++) {
-		sec = (struct i40e_profile_section_header *)((u8 *)profile +
-					     sec_tbl->section_offset[i]);
-
-		/* Skip 'AQ', 'note' and 'name' sections */
-		if (sec->section.type != SECTION_TYPE_MMIO)
-			continue;
-
-		section_size = sec->section.size +
-			sizeof(struct i40e_profile_section_header);
-
-		/* Write profile */
-		status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
-					     track_id, &offset, &info, NULL);
-		if (status) {
-			i40e_debug(hw, I40E_DEBUG_PACKAGE,
-				   "Failed to write profile: offset %d, info %d",
-				   offset, info);
-			break;
-		}
-	}
-	return status;
-}
-
-/**
- * i40evf_add_pinfo_to_list
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package
- * @profile_info_sec: buffer for information section
- * @track_id: package tracking id
- *
- * Register a profile to the list of loaded profiles.
- */
-enum i40e_status_code
-i40evf_add_pinfo_to_list(struct i40e_hw *hw,
-			 struct i40e_profile_segment *profile,
-			 u8 *profile_info_sec, u32 track_id)
-{
-	i40e_status status = 0;
-	struct i40e_profile_section_header *sec = NULL;
-	struct i40e_profile_info *pinfo;
-	u32 offset = 0, info = 0;
-
-	sec = (struct i40e_profile_section_header *)profile_info_sec;
-	sec->tbl_size = 1;
-	sec->data_end = sizeof(struct i40e_profile_section_header) +
-			sizeof(struct i40e_profile_info);
-	sec->section.type = SECTION_TYPE_INFO;
-	sec->section.offset = sizeof(struct i40e_profile_section_header);
-	sec->section.size = sizeof(struct i40e_profile_info);
-	pinfo = (struct i40e_profile_info *)(profile_info_sec +
-					     sec->section.offset);
-	pinfo->track_id = track_id;
-	pinfo->version = profile->version;
-	pinfo->op = I40E_DDP_ADD_TRACKID;
-	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
-
-	status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
-				     track_id, &offset, &info, NULL);
-	return status;
-}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
deleted file mode 100644
index f300bf271824..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_DEVIDS_H_
-#define _I40E_DEVIDS_H_
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710		0x1572
-#define I40E_DEV_ID_QEMU		0x1574
-#define I40E_DEV_ID_KX_B		0x1580
-#define I40E_DEV_ID_KX_C		0x1581
-#define I40E_DEV_ID_QSFP_A		0x1583
-#define I40E_DEV_ID_QSFP_B		0x1584
-#define I40E_DEV_ID_QSFP_C		0x1585
-#define I40E_DEV_ID_10G_BASE_T		0x1586
-#define I40E_DEV_ID_20G_KR2		0x1587
-#define I40E_DEV_ID_20G_KR2_A		0x1588
-#define I40E_DEV_ID_10G_BASE_T4		0x1589
-#define I40E_DEV_ID_25G_B		0x158A
-#define I40E_DEV_ID_25G_SFP28		0x158B
-#define I40E_DEV_ID_VF			0x154C
-#define I40E_DEV_ID_VF_HV		0x1571
-#define I40E_DEV_ID_ADAPTIVE_VF		0x1889
-#define I40E_DEV_ID_SFP_X722		0x37D0
-#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
-#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
-#define I40E_DEV_ID_SFP_I_X722		0x37D3
-#define I40E_DEV_ID_X722_VF		0x37CD
-
-#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
-					 (d) == I40E_DEV_ID_QSFP_B  || \
-					 (d) == I40E_DEV_ID_QSFP_C)
-
-#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
deleted file mode 100644
index 1c78de838857..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_HMC_H_
-#define _I40E_HMC_H_
-
-#define I40E_HMC_MAX_BP_COUNT 512
-
-/* forward-declare the HW struct for the compiler */
-struct i40e_hw;
-
-#define I40E_HMC_INFO_SIGNATURE		0x484D5347 /* HMSG */
-#define I40E_HMC_PD_CNT_IN_SD		512
-#define I40E_HMC_DIRECT_BP_SIZE		0x200000 /* 2M */
-#define I40E_HMC_PAGED_BP_SIZE		4096
-#define I40E_HMC_PD_BP_BUF_ALIGNMENT	4096
-#define I40E_FIRST_VF_FPM_ID		16
-
-struct i40e_hmc_obj_info {
-	u64 base;	/* base addr in FPM */
-	u32 max_cnt;	/* max count available for this hmc func */
-	u32 cnt;	/* count of objects driver actually wants to create */
-	u64 size;	/* size in bytes of one object */
-};
-
-enum i40e_sd_entry_type {
-	I40E_SD_TYPE_INVALID = 0,
-	I40E_SD_TYPE_PAGED   = 1,
-	I40E_SD_TYPE_DIRECT  = 2
-};
-
-struct i40e_hmc_bp {
-	enum i40e_sd_entry_type entry_type;
-	struct i40e_dma_mem addr; /* populate to be used by hw */
-	u32 sd_pd_index;
-	u32 ref_cnt;
-};
-
-struct i40e_hmc_pd_entry {
-	struct i40e_hmc_bp bp;
-	u32 sd_index;
-	bool rsrc_pg;
-	bool valid;
-};
-
-struct i40e_hmc_pd_table {
-	struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
-	struct i40e_hmc_pd_entry  *pd_entry; /* [512] for sw book keeping */
-	struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
-
-	u32 ref_cnt;
-	u32 sd_index;
-};
-
-struct i40e_hmc_sd_entry {
-	enum i40e_sd_entry_type entry_type;
-	bool valid;
-
-	union {
-		struct i40e_hmc_pd_table pd_table;
-		struct i40e_hmc_bp bp;
-	} u;
-};
-
-struct i40e_hmc_sd_table {
-	struct i40e_virt_mem addr; /* used to track sd_entry allocations */
-	u32 sd_cnt;
-	u32 ref_cnt;
-	struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
-};
-
-struct i40e_hmc_info {
-	u32 signature;
-	/* equals to pci func num for PF and dynamically allocated for VFs */
-	u8 hmc_fn_id;
-	u16 first_sd_index; /* index of the first available SD */
-
-	/* hmc objects */
-	struct i40e_hmc_obj_info *hmc_obj;
-	struct i40e_virt_mem hmc_obj_virt_mem;
-	struct i40e_hmc_sd_table sd_table;
-};
-
-#define I40E_INC_SD_REFCNT(sd_table)	((sd_table)->ref_cnt++)
-#define I40E_INC_PD_REFCNT(pd_table)	((pd_table)->ref_cnt++)
-#define I40E_INC_BP_REFCNT(bp)		((bp)->ref_cnt++)
-
-#define I40E_DEC_SD_REFCNT(sd_table)	((sd_table)->ref_cnt--)
-#define I40E_DEC_PD_REFCNT(pd_table)	((pd_table)->ref_cnt--)
-#define I40E_DEC_BP_REFCNT(bp)		((bp)->ref_cnt--)
-
-/**
- * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
- * @hw: pointer to our hw struct
- * @pa: pointer to physical address
- * @sd_index: segment descriptor index
- * @type: if sd entry is direct or paged
- **/
-#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type)			\
-{									\
-	u32 val1, val2, val3;						\
-	val1 = (u32)(upper_32_bits(pa));				\
-	val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT <<			\
-		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
-		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
-		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
-		BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
-	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
-	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
-	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
-	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
-}
-
-/**
- * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
- * @hw: pointer to our hw struct
- * @sd_index: segment descriptor index
- * @type: if sd entry is direct or paged
- **/
-#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type)			\
-{									\
-	u32 val2, val3;							\
-	val2 = (I40E_HMC_MAX_BP_COUNT <<				\
-		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
-		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
-		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
-	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
-	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
-	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
-	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
-}
-
-/**
- * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
- * @hw: pointer to our hw struct
- * @sd_idx: segment descriptor index
- * @pd_idx: page descriptor index
- **/
-#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx)			\
-	wr32((hw), I40E_PFHMC_PDINV,					\
-	    (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |		\
-	     ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
-/**
- * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
- * @hmc_info: pointer to the HMC configuration information structure
- * @type: type of HMC resources we're searching
- * @index: starting index for the object
- * @cnt: number of objects we're trying to create
- * @sd_idx: pointer to return index of the segment descriptor in question
- * @sd_limit: pointer to return the maximum number of segment descriptors
- *
- * This function calculates the segment descriptor index and index limit
- * for the resource defined by i40e_hmc_rsrc_type.
- **/
-#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
-{									\
-	u64 fpm_addr, fpm_limit;					\
-	fpm_addr = (hmc_info)->hmc_obj[(type)].base +			\
-		   (hmc_info)->hmc_obj[(type)].size * (index);		\
-	fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
-	*(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE);		\
-	*(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE);	\
-	/* add one more to the limit to correct our range */		\
-	*(sd_limit) += 1;						\
-}
-
-/**
- * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
- * @hmc_info: pointer to the HMC configuration information struct
- * @type: HMC resource type we're examining
- * @idx: starting index for the object
- * @cnt: number of objects we're trying to create
- * @pd_index: pointer to return page descriptor index
- * @pd_limit: pointer to return page descriptor index limit
- *
- * Calculates the page descriptor index and index limit for the resource
- * defined by i40e_hmc_rsrc_type.
- **/
-#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
-{									\
-	u64 fpm_adr, fpm_limit;						\
-	fpm_adr = (hmc_info)->hmc_obj[(type)].base +			\
-		  (hmc_info)->hmc_obj[(type)].size * (idx);		\
-	fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);	\
-	*(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE);		\
-	*(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE);	\
-	/* add one more to the limit to correct our range */		\
-	*(pd_limit) += 1;						\
-}
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
-					      struct i40e_hmc_info *hmc_info,
-					      u32 sd_index,
-					      enum i40e_sd_entry_type type,
-					      u64 direct_mode_sz);
-
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
-					      struct i40e_hmc_info *hmc_info,
-					      u32 pd_index,
-					      struct i40e_dma_mem *rsrc_pg);
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
-					struct i40e_hmc_info *hmc_info,
-					u32 idx);
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
-					     u32 idx);
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
-					    struct i40e_hmc_info *hmc_info,
-					    u32 idx, bool is_pf);
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
-					       u32 idx);
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
-					      struct i40e_hmc_info *hmc_info,
-					      u32 idx, bool is_pf);
-
-#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
deleted file mode 100644
index 82b00f70a632..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_LAN_HMC_H_
-#define _I40E_LAN_HMC_H_
-
-/* forward-declare the HW struct for the compiler */
-struct i40e_hw;
-
-/* HMC element context information */
-
-/* Rx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
-struct i40e_hmc_obj_rxq {
-	u16 head;
-	u16 cpuid; /* bigger than needed, see above for reason */
-	u64 base;
-	u16 qlen;
-#define I40E_RXQ_CTX_DBUFF_SHIFT 7
-	u16 dbuff; /* bigger than needed, see above for reason */
-#define I40E_RXQ_CTX_HBUFF_SHIFT 6
-	u16 hbuff; /* bigger than needed, see above for reason */
-	u8  dtype;
-	u8  dsize;
-	u8  crcstrip;
-	u8  fc_ena;
-	u8  l2tsel;
-	u8  hsplit_0;
-	u8  hsplit_1;
-	u8  showiv;
-	u32 rxmax; /* bigger than needed, see above for reason */
-	u8  tphrdesc_ena;
-	u8  tphwdesc_ena;
-	u8  tphdata_ena;
-	u8  tphhead_ena;
-	u16 lrxqthresh; /* bigger than needed, see above for reason */
-	u8  prefena;	/* NOTE: normally must be set to 1 at init */
-};
-
-/* Tx queue context data
-*
-* The sizes of the variables may be larger than needed due to crossing byte
-* boundaries. If we do not have the width of the variable set to the correct
-* size then we could end up shifting bits off the top of the variable when the
-* variable is at the top of a byte and crosses over into the next byte.
-*/
-struct i40e_hmc_obj_txq {
-	u16 head;
-	u8  new_context;
-	u64 base;
-	u8  fc_ena;
-	u8  timesync_ena;
-	u8  fd_ena;
-	u8  alt_vlan_ena;
-	u16 thead_wb;
-	u8  cpuid;
-	u8  head_wb_ena;
-	u16 qlen;
-	u8  tphrdesc_ena;
-	u8  tphrpacket_ena;
-	u8  tphwdesc_ena;
-	u64 head_wb_addr;
-	u32 crc;
-	u16 rdylist;
-	u8  rdylist_act;
-};
-
-/* for hsplit_0 field of Rx HMC context */
-enum i40e_hmc_obj_rx_hsplit_0 {
-	I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT      = 0,
-	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2      = 1,
-	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP      = 2,
-	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
-	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP    = 8,
-};
-
-/* fcoe_cntx and fcoe_filt are for debugging purpose only */
-struct i40e_hmc_obj_fcoe_cntx {
-	u32 rsv[32];
-};
-
-struct i40e_hmc_obj_fcoe_filt {
-	u32 rsv[8];
-};
-
-/* Context sizes for LAN objects */
-enum i40e_hmc_lan_object_size {
-	I40E_HMC_LAN_OBJ_SZ_8   = 0x3,
-	I40E_HMC_LAN_OBJ_SZ_16  = 0x4,
-	I40E_HMC_LAN_OBJ_SZ_32  = 0x5,
-	I40E_HMC_LAN_OBJ_SZ_64  = 0x6,
-	I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
-	I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
-	I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
-};
-
-#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
-#define I40E_HMC_OBJ_SIZE_TXQ         128
-#define I40E_HMC_OBJ_SIZE_RXQ         32
-#define I40E_HMC_OBJ_SIZE_FCOE_CNTX   128
-#define I40E_HMC_OBJ_SIZE_FCOE_FILT   64
-
-enum i40e_hmc_lan_rsrc_type {
-	I40E_HMC_LAN_FULL  = 0,
-	I40E_HMC_LAN_TX    = 1,
-	I40E_HMC_LAN_RX    = 2,
-	I40E_HMC_FCOE_CTX  = 3,
-	I40E_HMC_FCOE_FILT = 4,
-	I40E_HMC_LAN_MAX   = 5
-};
-
-enum i40e_hmc_model {
-	I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
-	I40E_HMC_MODEL_DIRECT_ONLY      = 1,
-	I40E_HMC_MODEL_PAGED_ONLY       = 2,
-	I40E_HMC_MODEL_UNKNOWN,
-};
-
-struct i40e_hmc_lan_create_obj_info {
-	struct i40e_hmc_info *hmc_info;
-	u32 rsrc_type;
-	u32 start_idx;
-	u32 count;
-	enum i40e_sd_entry_type entry_type;
-	u64 direct_mode_sz;
-};
-
-struct i40e_hmc_lan_delete_obj_info {
-	struct i40e_hmc_info *hmc_info;
-	u32 rsrc_type;
-	u32 start_idx;
-	u32 count;
-};
-
-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
-					u32 rxq_num, u32 fcoe_cntx_num,
-					u32 fcoe_filt_num);
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
-					     enum i40e_hmc_model model);
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
-
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
-						      u16 queue);
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
-						    u16 queue,
-						    struct i40e_hmc_obj_txq *s);
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
-						      u16 queue);
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
-						    u16 queue,
-						    struct i40e_hmc_obj_rxq *s);
-
-#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
deleted file mode 100644
index 3ddddb46455b..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_OSDEP_H_
-#define _I40E_OSDEP_H_
-
-#include <linux/types.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/tcp.h>
-#include <linux/pci.h>
-
-/* get readq/writeq support for 32 bit kernels, use the low-first version */
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-/* File to be the magic between shared code and
- * actual OS primitives
- */
-
-#define hw_dbg(hw, S, A...)	do {} while (0)
-
-#define wr32(a, reg, value)	writel((value), ((a)->hw_addr + (reg)))
-#define rd32(a, reg)		readl((a)->hw_addr + (reg))
-
-#define wr64(a, reg, value)	writeq((value), ((a)->hw_addr + (reg)))
-#define rd64(a, reg)		readq((a)->hw_addr + (reg))
-#define i40e_flush(a)		readl((a)->hw_addr + I40E_VFGEN_RSTAT)
-
-/* memory allocation tracking */
-struct i40e_dma_mem {
-	void *va;
-	dma_addr_t pa;
-	u32 size;
-};
-
-#define i40e_allocate_dma_mem(h, m, unused, s, a) \
-	i40evf_allocate_dma_mem_d(h, m, s, a)
-#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
-
-struct i40e_virt_mem {
-	void *va;
-	u32 size;
-};
-#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
-#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
-
-#define i40e_debug(h, m, s, ...)  i40evf_debug_d(h, m, s, ##__VA_ARGS__)
-extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
-	__attribute__ ((format(gnu_printf, 3, 4)));
-
-typedef enum i40e_status_code i40e_status;
-#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
deleted file mode 100644
index a358f4b9d5aa..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_PROTOTYPE_H_
-#define _I40E_PROTOTYPE_H_
-
-#include "i40e_type.h"
-#include "i40e_alloc.h"
-#include <linux/avf/virtchnl.h>
-
-/* Prototypes for shared code functions that are not in
- * the standard function pointer structures.  These are
- * mostly because they are needed even before the init
- * has happened and will assist in the early SW and FW
- * setup.
- */
-
-/* adminq functions */
-i40e_status i40evf_init_adminq(struct i40e_hw *hw);
-i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
-void i40e_adminq_init_ring_data(struct i40e_hw *hw);
-i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
-					     struct i40e_arq_event_info *e,
-					     u16 *events_pending);
-i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
-				struct i40e_aq_desc *desc,
-				void *buff, /* can be NULL */
-				u16  buff_size,
-				struct i40e_asq_cmd_details *cmd_details);
-bool i40evf_asq_done(struct i40e_hw *hw);
-
-/* debug function for adminq */
-void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
-		     void *desc, void *buffer, u16 buf_len);
-
-void i40e_idle_aq(struct i40e_hw *hw);
-void i40evf_resume_aq(struct i40e_hw *hw);
-bool i40evf_check_asq_alive(struct i40e_hw *hw);
-i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
-
-i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
-				  bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
-				  bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
-				  u16 seid,
-				  struct i40e_aqc_get_set_rss_key_data *key);
-i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
-				  u16 seid,
-				  struct i40e_aqc_get_set_rss_key_data *key);
-
-i40e_status i40e_set_mac_type(struct i40e_hw *hw);
-
-extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
-
-static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
-	return i40evf_ptype_lookup[ptype];
-}
-
-/* prototype for functions used for SW locks */
-
-/* i40e_common for VF drivers*/
-void i40e_vf_parse_hw_config(struct i40e_hw *hw,
-			     struct virtchnl_vf_resource *msg);
-i40e_status i40e_vf_reset(struct i40e_hw *hw);
-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
-				enum virtchnl_ops v_opcode,
-				i40e_status v_retval,
-				u8 *msg, u16 msglen,
-				struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
-				struct i40e_filter_control_settings *settings);
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
-				u8 *mac_addr, u16 ethtype, u16 flags,
-				u16 vsi_seid, u16 queue, bool is_add,
-				struct i40e_control_filter_stats *stats,
-				struct i40e_asq_cmd_details *cmd_details);
-void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
-						    u16 vsi_seid);
-i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
-				u32 reg_addr, u32 *reg_val,
-				struct i40e_asq_cmd_details *cmd_details);
-u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
-i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
-				u32 reg_addr, u32 reg_val,
-				struct i40e_asq_cmd_details *cmd_details);
-void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
-				     u8 phy_select, u8 dev_addr,
-				     u32 reg_addr, u32 reg_val,
-				     struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
-				     u8 phy_select, u8 dev_addr,
-				     u32 reg_addr, u32 *reg_val,
-				     struct i40e_asq_cmd_details *cmd_details);
-
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
-				   u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
-				    u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
-				   u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
-				    u8 phy_addr, u16 value);
-u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
-				    u32 time, u32 interval);
-i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
-				u16 buff_size, u32 track_id,
-				u32 *error_offset, u32 *error_info,
-				struct i40e_asq_cmd_details *
-				cmd_details);
-i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
-				   u16 buff_size, u8 flags,
-				   struct i40e_asq_cmd_details *
-				   cmd_details);
-struct i40e_generic_seg_header *
-i40evf_find_segment_in_package(u32 segment_type,
-			       struct i40e_package_header *pkg_header);
-enum i40e_status_code
-i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
-		     u32 track_id);
-enum i40e_status_code
-i40evf_add_pinfo_to_list(struct i40e_hw *hw,
-			 struct i40e_profile_segment *profile,
-			 u8 *profile_info_sec, u32 track_id);
-#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
deleted file mode 100644
index 49e1f57d99cc..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ /dev/null
@@ -1,313 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_REGISTER_H_
-#define _I40E_REGISTER_H_
-
-#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
-#define I40E_VFMSIX_PBA1_MAX_INDEX 19
-#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TADD1_MAX_INDEX 639
-#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
-#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
-#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
-#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
-#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
-#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
-#define I40E_VF_ARQH1_ARQH_SHIFT 0
-#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
-#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
-#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
-#define I40E_VF_ARQT1_ARQT_SHIFT 0
-#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
-#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
-#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
-#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
-#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
-#define I40E_VF_ATQH1_ATQH_SHIFT 0
-#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
-#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
-#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
-#define I40E_VF_ATQT1_ATQT_SHIFT 0
-#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
-#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
-#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
-#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
-#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
-#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
-#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR01_SWINT_SHIFT 31
-#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
-#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
-#define I40E_VFINT_ITR01_MAX_INDEX 2
-#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN1_MAX_INDEX 2
-#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
-#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_QRX_TAIL1_MAX_INDEX 15
-#define I40E_QRX_TAIL1_TAIL_SHIFT 0
-#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
-#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
-#define I40E_QTX_TAIL1_MAX_INDEX 15
-#define I40E_QTX_TAIL1_TAIL_SHIFT 0
-#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
-#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
-#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TADD_MAX_INDEX 16
-#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TMSG_MAX_INDEX 16
-#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TUADD_MAX_INDEX 16
-#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
-#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_VFQF_HENA_MAX_INDEX 1
-#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_VFQF_HKEY_MAX_INDEX 12
-#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_VFQF_HLUT_MAX_INDEX 15
-#define I40E_VFQF_HLUT_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
-#define I40E_VFQF_HLUT_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
-#define I40E_VFQF_HLUT_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
-#define I40E_VFQF_HLUT_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
-#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION_MAX_INDEX 7
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
-#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
-#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
deleted file mode 100644
index 77be0702d07c..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_STATUS_H_
-#define _I40E_STATUS_H_
-
-/* Error Codes */
-enum i40e_status_code {
-	I40E_SUCCESS				= 0,
-	I40E_ERR_NVM				= -1,
-	I40E_ERR_NVM_CHECKSUM			= -2,
-	I40E_ERR_PHY				= -3,
-	I40E_ERR_CONFIG				= -4,
-	I40E_ERR_PARAM				= -5,
-	I40E_ERR_MAC_TYPE			= -6,
-	I40E_ERR_UNKNOWN_PHY			= -7,
-	I40E_ERR_LINK_SETUP			= -8,
-	I40E_ERR_ADAPTER_STOPPED		= -9,
-	I40E_ERR_INVALID_MAC_ADDR		= -10,
-	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11,
-	I40E_ERR_MASTER_REQUESTS_PENDING	= -12,
-	I40E_ERR_INVALID_LINK_SETTINGS		= -13,
-	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,
-	I40E_ERR_RESET_FAILED			= -15,
-	I40E_ERR_SWFW_SYNC			= -16,
-	I40E_ERR_NO_AVAILABLE_VSI		= -17,
-	I40E_ERR_NO_MEMORY			= -18,
-	I40E_ERR_BAD_PTR			= -19,
-	I40E_ERR_RING_FULL			= -20,
-	I40E_ERR_INVALID_PD_ID			= -21,
-	I40E_ERR_INVALID_QP_ID			= -22,
-	I40E_ERR_INVALID_CQ_ID			= -23,
-	I40E_ERR_INVALID_CEQ_ID			= -24,
-	I40E_ERR_INVALID_AEQ_ID			= -25,
-	I40E_ERR_INVALID_SIZE			= -26,
-	I40E_ERR_INVALID_ARP_INDEX		= -27,
-	I40E_ERR_INVALID_FPM_FUNC_ID		= -28,
-	I40E_ERR_QP_INVALID_MSG_SIZE		= -29,
-	I40E_ERR_QP_TOOMANY_WRS_POSTED		= -30,
-	I40E_ERR_INVALID_FRAG_COUNT		= -31,
-	I40E_ERR_QUEUE_EMPTY			= -32,
-	I40E_ERR_INVALID_ALIGNMENT		= -33,
-	I40E_ERR_FLUSHED_QUEUE			= -34,
-	I40E_ERR_INVALID_PUSH_PAGE_INDEX	= -35,
-	I40E_ERR_INVALID_IMM_DATA_SIZE		= -36,
-	I40E_ERR_TIMEOUT			= -37,
-	I40E_ERR_OPCODE_MISMATCH		= -38,
-	I40E_ERR_CQP_COMPL_ERROR		= -39,
-	I40E_ERR_INVALID_VF_ID			= -40,
-	I40E_ERR_INVALID_HMCFN_ID		= -41,
-	I40E_ERR_BACKING_PAGE_ERROR		= -42,
-	I40E_ERR_NO_PBLCHUNKS_AVAILABLE		= -43,
-	I40E_ERR_INVALID_PBLE_INDEX		= -44,
-	I40E_ERR_INVALID_SD_INDEX		= -45,
-	I40E_ERR_INVALID_PAGE_DESC_INDEX	= -46,
-	I40E_ERR_INVALID_SD_TYPE		= -47,
-	I40E_ERR_MEMCPY_FAILED			= -48,
-	I40E_ERR_INVALID_HMC_OBJ_INDEX		= -49,
-	I40E_ERR_INVALID_HMC_OBJ_COUNT		= -50,
-	I40E_ERR_INVALID_SRQ_ARM_LIMIT		= -51,
-	I40E_ERR_SRQ_ENABLED			= -52,
-	I40E_ERR_ADMIN_QUEUE_ERROR		= -53,
-	I40E_ERR_ADMIN_QUEUE_TIMEOUT		= -54,
-	I40E_ERR_BUF_TOO_SHORT			= -55,
-	I40E_ERR_ADMIN_QUEUE_FULL		= -56,
-	I40E_ERR_ADMIN_QUEUE_NO_WORK		= -57,
-	I40E_ERR_BAD_IWARP_CQE			= -58,
-	I40E_ERR_NVM_BLANK_MODE			= -59,
-	I40E_ERR_NOT_IMPLEMENTED		= -60,
-	I40E_ERR_PE_DOORBELL_NOT_ENABLED	= -61,
-	I40E_ERR_DIAG_TEST_FAILED		= -62,
-	I40E_ERR_NOT_READY			= -63,
-	I40E_NOT_SUPPORTED			= -64,
-	I40E_ERR_FIRMWARE_API_VERSION		= -65,
-	I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR	= -66,
-};
-
-#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
deleted file mode 100644
index d7a4e68820a8..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-/* Modeled on trace-events-sample.h */
-
-/* The trace subsystem name for i40evf will be "i40evf".
- *
- * This file is named i40e_trace.h.
- *
- * Since this include file's name is different from the trace
- * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
- * of this file.
- */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM i40evf
-
-/* See trace-events-sample.h for a detailed description of why this
- * guard clause is different from most normal include files.
- */
-#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
-#define _I40E_TRACE_H_
-
-#include <linux/tracepoint.h>
-
-/**
- * i40e_trace() macro enables shared code to refer to trace points
- * like:
- *
- * trace_i40e{,vf}_example(args...)
- *
- * ... as:
- *
- * i40e_trace(example, args...)
- *
- * ... to resolve to the PF or VF version of the tracepoint without
- * ifdefs, and to allow tracepoints to be disabled entirely at build
- * time.
- *
- * Trace point should always be referred to in the driver via this
- * macro.
- *
- * Similarly, i40e_trace_enabled(trace_name) wraps references to
- * trace_i40e{,vf}_<trace_name>_enabled() functions.
- */
-#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name)
-#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
-
-#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
-
-#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
-
-/* Events common to PF and VF. Corresponding versions will be defined
- * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
- * macro above will select the right trace point name for the driver
- * being built from shared code.
- */
-
-/* Events related to a vsi & ring */
-DECLARE_EVENT_CLASS(
-	i40evf_tx_template,
-
-	TP_PROTO(struct i40e_ring *ring,
-		 struct i40e_tx_desc *desc,
-		 struct i40e_tx_buffer *buf),
-
-	TP_ARGS(ring, desc, buf),
-
-	/* The convention here is to make the first fields in the
-	 * TP_STRUCT match the TP_PROTO exactly. This enables the use
-	 * of the args struct generated by the tplist tool (from the
-	 * bcc-tools package) to be used for those fields. To access
-	 * fields other than the tracepoint args will require the
-	 * tplist output to be adjusted.
-	 */
-	TP_STRUCT__entry(
-		__field(void*, ring)
-		__field(void*, desc)
-		__field(void*, buf)
-		__string(devname, ring->netdev->name)
-	),
-
-	TP_fast_assign(
-		__entry->ring = ring;
-		__entry->desc = desc;
-		__entry->buf = buf;
-		__assign_str(devname, ring->netdev->name);
-	),
-
-	TP_printk(
-		"netdev: %s ring: %p desc: %p buf %p",
-		__get_str(devname), __entry->ring,
-		__entry->desc, __entry->buf)
-);
-
-DEFINE_EVENT(
-	i40evf_tx_template, i40evf_clean_tx_irq,
-	TP_PROTO(struct i40e_ring *ring,
-		 struct i40e_tx_desc *desc,
-		 struct i40e_tx_buffer *buf),
-
-	TP_ARGS(ring, desc, buf));
-
-DEFINE_EVENT(
-	i40evf_tx_template, i40evf_clean_tx_irq_unmap,
-	TP_PROTO(struct i40e_ring *ring,
-		 struct i40e_tx_desc *desc,
-		 struct i40e_tx_buffer *buf),
-
-	TP_ARGS(ring, desc, buf));
-
-DECLARE_EVENT_CLASS(
-	i40evf_rx_template,
-
-	TP_PROTO(struct i40e_ring *ring,
-		 union i40e_32byte_rx_desc *desc,
-		 struct sk_buff *skb),
-
-	TP_ARGS(ring, desc, skb),
-
-	TP_STRUCT__entry(
-		__field(void*, ring)
-		__field(void*, desc)
-		__field(void*, skb)
-		__string(devname, ring->netdev->name)
-	),
-
-	TP_fast_assign(
-		__entry->ring = ring;
-		__entry->desc = desc;
-		__entry->skb = skb;
-		__assign_str(devname, ring->netdev->name);
-	),
-
-	TP_printk(
-		"netdev: %s ring: %p desc: %p skb %p",
-		__get_str(devname), __entry->ring,
-		__entry->desc, __entry->skb)
-);
-
-DEFINE_EVENT(
-	i40evf_rx_template, i40evf_clean_rx_irq,
-	TP_PROTO(struct i40e_ring *ring,
-		 union i40e_32byte_rx_desc *desc,
-		 struct sk_buff *skb),
-
-	TP_ARGS(ring, desc, skb));
-
-DEFINE_EVENT(
-	i40evf_rx_template, i40evf_clean_rx_irq_rx,
-	TP_PROTO(struct i40e_ring *ring,
-		 union i40e_32byte_rx_desc *desc,
-		 struct sk_buff *skb),
-
-	TP_ARGS(ring, desc, skb));
-
-DECLARE_EVENT_CLASS(
-	i40evf_xmit_template,
-
-	TP_PROTO(struct sk_buff *skb,
-		 struct i40e_ring *ring),
-
-	TP_ARGS(skb, ring),
-
-	TP_STRUCT__entry(
-		__field(void*, skb)
-		__field(void*, ring)
-		__string(devname, ring->netdev->name)
-	),
-
-	TP_fast_assign(
-		__entry->skb = skb;
-		__entry->ring = ring;
-		__assign_str(devname, ring->netdev->name);
-	),
-
-	TP_printk(
-		"netdev: %s skb: %p ring: %p",
-		__get_str(devname), __entry->skb,
-		__entry->ring)
-);
-
-DEFINE_EVENT(
-	i40evf_xmit_template, i40evf_xmit_frame_ring,
-	TP_PROTO(struct sk_buff *skb,
-		 struct i40e_ring *ring),
-
-	TP_ARGS(skb, ring));
-
-DEFINE_EVENT(
-	i40evf_xmit_template, i40evf_xmit_frame_ring_drop,
-	TP_PROTO(struct sk_buff *skb,
-		 struct i40e_ring *ring),
-
-	TP_ARGS(skb, ring));
-
-/* Events unique to the VF. */
-
-#endif /* _I40E_TRACE_H_ */
-/* This must be outside ifdef _I40E_TRACE_H */
-
-/* This trace include file is not located in the .../include/trace
- * with the kernel tracepoint definitions, because we're a loadable
- * module.
- */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE i40e_trace
-#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
deleted file mode 100644
index 1bf9734ae9cf..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ /dev/null
@@ -1,2513 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#include <linux/prefetch.h>
-#include <net/busy_poll.h>
-
-#include "i40evf.h"
-#include "i40e_trace.h"
-#include "i40e_prototype.h"
-
-static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
-				u32 td_tag)
-{
-	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
-			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
-			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
-			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
-			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
-}
-
-#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
-
-/**
- * i40e_unmap_and_free_tx_resource - Release a Tx buffer
- * @ring:      the ring that owns the buffer
- * @tx_buffer: the buffer to free
- **/
-static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
-					    struct i40e_tx_buffer *tx_buffer)
-{
-	if (tx_buffer->skb) {
-		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
-			kfree(tx_buffer->raw_buf);
-		else
-			dev_kfree_skb_any(tx_buffer->skb);
-		if (dma_unmap_len(tx_buffer, len))
-			dma_unmap_single(ring->dev,
-					 dma_unmap_addr(tx_buffer, dma),
-					 dma_unmap_len(tx_buffer, len),
-					 DMA_TO_DEVICE);
-	} else if (dma_unmap_len(tx_buffer, len)) {
-		dma_unmap_page(ring->dev,
-			       dma_unmap_addr(tx_buffer, dma),
-			       dma_unmap_len(tx_buffer, len),
-			       DMA_TO_DEVICE);
-	}
-
-	tx_buffer->next_to_watch = NULL;
-	tx_buffer->skb = NULL;
-	dma_unmap_len_set(tx_buffer, len, 0);
-	/* tx_buffer must be completely set up in the transmit path */
-}
-
-/**
- * i40evf_clean_tx_ring - Free any empty Tx buffers
- * @tx_ring: ring to be cleaned
- **/
-void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
-{
-	unsigned long bi_size;
-	u16 i;
-
-	/* ring already cleared, nothing to do */
-	if (!tx_ring->tx_bi)
-		return;
-
-	/* Free all the Tx ring sk_buffs */
-	for (i = 0; i < tx_ring->count; i++)
-		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
-
-	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
-	memset(tx_ring->tx_bi, 0, bi_size);
-
-	/* Zero out the descriptor ring */
-	memset(tx_ring->desc, 0, tx_ring->size);
-
-	tx_ring->next_to_use = 0;
-	tx_ring->next_to_clean = 0;
-
-	if (!tx_ring->netdev)
-		return;
-
-	/* cleanup Tx queue statistics */
-	netdev_tx_reset_queue(txring_txq(tx_ring));
-}
-
-/**
- * i40evf_free_tx_resources - Free Tx resources per queue
- * @tx_ring: Tx descriptor ring for a specific queue
- *
- * Free all transmit software resources
- **/
-void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
-{
-	i40evf_clean_tx_ring(tx_ring);
-	kfree(tx_ring->tx_bi);
-	tx_ring->tx_bi = NULL;
-
-	if (tx_ring->desc) {
-		dma_free_coherent(tx_ring->dev, tx_ring->size,
-				  tx_ring->desc, tx_ring->dma);
-		tx_ring->desc = NULL;
-	}
-}
-
-/**
- * i40evf_get_tx_pending - how many Tx descriptors not processed
- * @ring: the ring of descriptors
- * @in_sw: is tx_pending being checked in SW or HW
- *
- * Since there is no access to the ring head register
- * in XL710, we need to use our local copies
- **/
-u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
-{
-	u32 head, tail;
-
-	/* underlying hardware might not allow access and/or always return
-	 * 0 for the head/tail registers so just use the cached values
-	 */
-	head = ring->next_to_clean;
-	tail = ring->next_to_use;
-
-	if (head != tail)
-		return (head < tail) ?
-			tail - head : (tail + ring->count - head);
-
-	return 0;
-}
-
-/**
- * i40evf_detect_recover_hung - Function to detect and recover hung_queues
- * @vsi:  pointer to vsi struct with tx queues
- *
- * VSI has netdev and netdev has TX queues. This function is to check each of
- * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
- **/
-void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
-{
-	struct i40e_ring *tx_ring = NULL;
-	struct net_device *netdev;
-	unsigned int i;
-	int packets;
-
-	if (!vsi)
-		return;
-
-	if (test_bit(__I40E_VSI_DOWN, vsi->state))
-		return;
-
-	netdev = vsi->netdev;
-	if (!netdev)
-		return;
-
-	if (!netif_carrier_ok(netdev))
-		return;
-
-	for (i = 0; i < vsi->back->num_active_queues; i++) {
-		tx_ring = &vsi->back->tx_rings[i];
-		if (tx_ring && tx_ring->desc) {
-			/* If packet counter has not changed the queue is
-			 * likely stalled, so force an interrupt for this
-			 * queue.
-			 *
-			 * prev_pkt_ctr would be negative if there was no
-			 * pending work.
-			 */
-			packets = tx_ring->stats.packets & INT_MAX;
-			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
-				i40evf_force_wb(vsi, tx_ring->q_vector);
-				continue;
-			}
-
-			/* Memory barrier between read of packet count and call
-			 * to i40evf_get_tx_pending()
-			 */
-			smp_rmb();
-			tx_ring->tx_stats.prev_pkt_ctr =
-			  i40evf_get_tx_pending(tx_ring, true) ? packets : -1;
-		}
-	}
-}
-
-#define WB_STRIDE 4
-
-/**
- * i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @vsi: the VSI we care about
- * @tx_ring: Tx ring to clean
- * @napi_budget: Used to determine if we are in netpoll
- *
- * Returns true if there's any budget left (e.g. the clean is finished)
- **/
-static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
-			      struct i40e_ring *tx_ring, int napi_budget)
-{
-	u16 i = tx_ring->next_to_clean;
-	struct i40e_tx_buffer *tx_buf;
-	struct i40e_tx_desc *tx_desc;
-	unsigned int total_bytes = 0, total_packets = 0;
-	unsigned int budget = vsi->work_limit;
-
-	tx_buf = &tx_ring->tx_bi[i];
-	tx_desc = I40E_TX_DESC(tx_ring, i);
-	i -= tx_ring->count;
-
-	do {
-		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
-
-		/* if next_to_watch is not set then there is no work pending */
-		if (!eop_desc)
-			break;
-
-		/* prevent any other reads prior to eop_desc */
-		smp_rmb();
-
-		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
-		/* if the descriptor isn't done, no work yet to do */
-		if (!(eop_desc->cmd_type_offset_bsz &
-		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
-			break;
-
-		/* clear next_to_watch to prevent false hangs */
-		tx_buf->next_to_watch = NULL;
-
-		/* update the statistics for this packet */
-		total_bytes += tx_buf->bytecount;
-		total_packets += tx_buf->gso_segs;
-
-		/* free the skb */
-		napi_consume_skb(tx_buf->skb, napi_budget);
-
-		/* unmap skb header data */
-		dma_unmap_single(tx_ring->dev,
-				 dma_unmap_addr(tx_buf, dma),
-				 dma_unmap_len(tx_buf, len),
-				 DMA_TO_DEVICE);
-
-		/* clear tx_buffer data */
-		tx_buf->skb = NULL;
-		dma_unmap_len_set(tx_buf, len, 0);
-
-		/* unmap remaining buffers */
-		while (tx_desc != eop_desc) {
-			i40e_trace(clean_tx_irq_unmap,
-				   tx_ring, tx_desc, tx_buf);
-
-			tx_buf++;
-			tx_desc++;
-			i++;
-			if (unlikely(!i)) {
-				i -= tx_ring->count;
-				tx_buf = tx_ring->tx_bi;
-				tx_desc = I40E_TX_DESC(tx_ring, 0);
-			}
-
-			/* unmap any remaining paged data */
-			if (dma_unmap_len(tx_buf, len)) {
-				dma_unmap_page(tx_ring->dev,
-					       dma_unmap_addr(tx_buf, dma),
-					       dma_unmap_len(tx_buf, len),
-					       DMA_TO_DEVICE);
-				dma_unmap_len_set(tx_buf, len, 0);
-			}
-		}
-
-		/* move us one more past the eop_desc for start of next pkt */
-		tx_buf++;
-		tx_desc++;
-		i++;
-		if (unlikely(!i)) {
-			i -= tx_ring->count;
-			tx_buf = tx_ring->tx_bi;
-			tx_desc = I40E_TX_DESC(tx_ring, 0);
-		}
-
-		prefetch(tx_desc);
-
-		/* update budget accounting */
-		budget--;
-	} while (likely(budget));
-
-	i += tx_ring->count;
-	tx_ring->next_to_clean = i;
-	u64_stats_update_begin(&tx_ring->syncp);
-	tx_ring->stats.bytes += total_bytes;
-	tx_ring->stats.packets += total_packets;
-	u64_stats_update_end(&tx_ring->syncp);
-	tx_ring->q_vector->tx.total_bytes += total_bytes;
-	tx_ring->q_vector->tx.total_packets += total_packets;
-
-	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
-		/* check to see if there are < 4 descriptors
-		 * waiting to be written back, then kick the hardware to force
-		 * them to be written back in case we stay in NAPI.
-		 * In this mode on X722 we do not enable Interrupt.
-		 */
-		unsigned int j = i40evf_get_tx_pending(tx_ring, false);
-
-		if (budget &&
-		    ((j / WB_STRIDE) == 0) && (j > 0) &&
-		    !test_bit(__I40E_VSI_DOWN, vsi->state) &&
-		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
-			tx_ring->arm_wb = true;
-	}
-
-	/* notify netdev of completed buffers */
-	netdev_tx_completed_queue(txring_txq(tx_ring),
-				  total_packets, total_bytes);
-
-#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
-	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
-		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
-		/* Make sure that anybody stopping the queue after this
-		 * sees the new next_to_clean.
-		 */
-		smp_mb();
-		if (__netif_subqueue_stopped(tx_ring->netdev,
-					     tx_ring->queue_index) &&
-		   !test_bit(__I40E_VSI_DOWN, vsi->state)) {
-			netif_wake_subqueue(tx_ring->netdev,
-					    tx_ring->queue_index);
-			++tx_ring->tx_stats.restart_queue;
-		}
-	}
-
-	return !!budget;
-}
-
-/**
- * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
- * @vsi: the VSI we care about
- * @q_vector: the vector on which to enable writeback
- *
- **/
-static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
-				  struct i40e_q_vector *q_vector)
-{
-	u16 flags = q_vector->tx.ring[0].flags;
-	u32 val;
-
-	if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
-		return;
-
-	if (q_vector->arm_wb_state)
-		return;
-
-	val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
-	      I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
-
-	wr32(&vsi->back->hw,
-	     I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
-	q_vector->arm_wb_state = true;
-}
-
-/**
- * i40evf_force_wb - Issue SW Interrupt so HW does a wb
- * @vsi: the VSI we care about
- * @q_vector: the vector  on which to force writeback
- *
- **/
-void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
-{
-	u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-		  I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
-		  I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
-		  I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
-		  /* allow 00 to be written to the index */;
-
-	wr32(&vsi->back->hw,
-	     I40E_VFINT_DYN_CTLN1(q_vector->reg_idx),
-	     val);
-}
-
-static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
-					struct i40e_ring_container *rc)
-{
-	return &q_vector->rx == rc;
-}
-
-static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
-{
-	unsigned int divisor;
-
-	switch (q_vector->adapter->link_speed) {
-	case I40E_LINK_SPEED_40GB:
-		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
-		break;
-	case I40E_LINK_SPEED_25GB:
-	case I40E_LINK_SPEED_20GB:
-		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
-		break;
-	default:
-	case I40E_LINK_SPEED_10GB:
-		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
-		break;
-	case I40E_LINK_SPEED_1GB:
-	case I40E_LINK_SPEED_100MB:
-		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
-		break;
-	}
-
-	return divisor;
-}
-
-/**
- * i40e_update_itr - update the dynamic ITR value based on statistics
- * @q_vector: structure containing interrupt and ring information
- * @rc: structure containing ring performance data
- *
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt.  The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern.  Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
- **/
-static void i40e_update_itr(struct i40e_q_vector *q_vector,
-			    struct i40e_ring_container *rc)
-{
-	unsigned int avg_wire_size, packets, bytes, itr;
-	unsigned long next_update = jiffies;
-
-	/* If we don't have any rings just leave ourselves set for maximum
-	 * possible latency so we take ourselves out of the equation.
-	 */
-	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
-		return;
-
-	/* For Rx we want to push the delay up and default to low latency.
-	 * for Tx we want to pull the delay down and default to high latency.
-	 */
-	itr = i40e_container_is_rx(q_vector, rc) ?
-	      I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
-	      I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
-
-	/* If we didn't update within up to 1 - 2 jiffies we can assume
-	 * that either packets are coming in so slow there hasn't been
-	 * any work, or that there is so much work that NAPI is dealing
-	 * with interrupt moderation and we don't need to do anything.
-	 */
-	if (time_after(next_update, rc->next_update))
-		goto clear_counts;
-
-	/* If itr_countdown is set it means we programmed an ITR within
-	 * the last 4 interrupt cycles. This has a side effect of us
-	 * potentially firing an early interrupt. In order to work around
-	 * this we need to throw out any data received for a few
-	 * interrupts following the update.
-	 */
-	if (q_vector->itr_countdown) {
-		itr = rc->target_itr;
-		goto clear_counts;
-	}
-
-	packets = rc->total_packets;
-	bytes = rc->total_bytes;
-
-	if (i40e_container_is_rx(q_vector, rc)) {
-		/* If Rx there are 1 to 4 packets and bytes are less than
-		 * 9000 assume insufficient data to use bulk rate limiting
-		 * approach unless Tx is already in bulk rate limiting. We
-		 * are likely latency driven.
-		 */
-		if (packets && packets < 4 && bytes < 9000 &&
-		    (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
-			itr = I40E_ITR_ADAPTIVE_LATENCY;
-			goto adjust_by_size;
-		}
-	} else if (packets < 4) {
-		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
-		 * bulk mode and we are receiving 4 or fewer packets just
-		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
-		 * that the Rx can relax.
-		 */
-		if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
-		    (q_vector->rx.target_itr & I40E_ITR_MASK) ==
-		     I40E_ITR_ADAPTIVE_MAX_USECS)
-			goto clear_counts;
-	} else if (packets > 32) {
-		/* If we have processed over 32 packets in a single interrupt
-		 * for Tx assume we need to switch over to "bulk" mode.
-		 */
-		rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
-	}
-
-	/* We have no packets to actually measure against. This means
-	 * either one of the other queues on this vector is active or
-	 * we are a Tx queue doing TSO with too high of an interrupt rate.
-	 *
-	 * Between 4 and 56 we can assume that our current interrupt delay
-	 * is only slightly too low. As such we should increase it by a small
-	 * fixed amount.
-	 */
-	if (packets < 56) {
-		itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
-		if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
-			itr &= I40E_ITR_ADAPTIVE_LATENCY;
-			itr += I40E_ITR_ADAPTIVE_MAX_USECS;
-		}
-		goto clear_counts;
-	}
-
-	if (packets <= 256) {
-		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
-		itr &= I40E_ITR_MASK;
-
-		/* Between 56 and 112 is our "goldilocks" zone where we are
-		 * working out "just right". Just report that our current
-		 * ITR is good for us.
-		 */
-		if (packets <= 112)
-			goto clear_counts;
-
-		/* If packet count is 128 or greater we are likely looking
-		 * at a slight overrun of the delay we want. Try halving
-		 * our delay to see if that will cut the number of packets
-		 * in half per interrupt.
-		 */
-		itr /= 2;
-		itr &= I40E_ITR_MASK;
-		if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
-			itr = I40E_ITR_ADAPTIVE_MIN_USECS;
-
-		goto clear_counts;
-	}
-
-	/* The paths below assume we are dealing with a bulk ITR since
-	 * number of packets is greater than 256. We are just going to have
-	 * to compute a value and try to bring the count under control,
-	 * though for smaller packet sizes there isn't much we can do as
-	 * NAPI polling will likely be kicking in sooner rather than later.
-	 */
-	itr = I40E_ITR_ADAPTIVE_BULK;
-
-adjust_by_size:
-	/* If packet counts are 256 or greater we can assume we have a gross
-	 * overestimation of what the rate should be. Instead of trying to fine
-	 * tune it just use the formula below to try and dial in an exact value
-	 * give the current packet size of the frame.
-	 */
-	avg_wire_size = bytes / packets;
-
-	/* The following is a crude approximation of:
-	 *  wmem_default / (size + overhead) = desired_pkts_per_int
-	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
-	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
-	 *
-	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
-	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
-	 * formula down to
-	 *
-	 *  (170 * (size + 24)) / (size + 640) = ITR
-	 *
-	 * We first do some math on the packet size and then finally bitshift
-	 * by 8 after rounding up. We also have to account for PCIe link speed
-	 * difference as ITR scales based on this.
-	 */
-	if (avg_wire_size <= 60) {
-		/* Start at 250k ints/sec */
-		avg_wire_size = 4096;
-	} else if (avg_wire_size <= 380) {
-		/* 250K ints/sec to 60K ints/sec */
-		avg_wire_size *= 40;
-		avg_wire_size += 1696;
-	} else if (avg_wire_size <= 1084) {
-		/* 60K ints/sec to 36K ints/sec */
-		avg_wire_size *= 15;
-		avg_wire_size += 11452;
-	} else if (avg_wire_size <= 1980) {
-		/* 36K ints/sec to 30K ints/sec */
-		avg_wire_size *= 5;
-		avg_wire_size += 22420;
-	} else {
-		/* plateau at a limit of 30K ints/sec */
-		avg_wire_size = 32256;
-	}
-
-	/* If we are in low latency mode halve our delay which doubles the
-	 * rate to somewhere between 100K to 16K ints/sec
-	 */
-	if (itr & I40E_ITR_ADAPTIVE_LATENCY)
-		avg_wire_size /= 2;
-
-	/* Resultant value is 256 times larger than it needs to be. This
-	 * gives us room to adjust the value as needed to either increase
-	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
-	 *
-	 * Use addition as we have already recorded the new latency flag
-	 * for the ITR value.
-	 */
-	itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
-	       I40E_ITR_ADAPTIVE_MIN_INC;
-
-	if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
-		itr &= I40E_ITR_ADAPTIVE_LATENCY;
-		itr += I40E_ITR_ADAPTIVE_MAX_USECS;
-	}
-
-clear_counts:
-	/* write back value */
-	rc->target_itr = itr;
-
-	/* next update should occur within next jiffy */
-	rc->next_update = next_update + 1;
-
-	rc->total_bytes = 0;
-	rc->total_packets = 0;
-}
-
-/**
- * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
- * @tx_ring: the tx ring to set up
- *
- * Return 0 on success, negative on error
- **/
-int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
-{
-	struct device *dev = tx_ring->dev;
-	int bi_size;
-
-	if (!dev)
-		return -ENOMEM;
-
-	/* warn if we are about to overwrite the pointer */
-	WARN_ON(tx_ring->tx_bi);
-	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
-	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
-	if (!tx_ring->tx_bi)
-		goto err;
-
-	/* round up to nearest 4K */
-	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
-	tx_ring->size = ALIGN(tx_ring->size, 4096);
-	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
-					   &tx_ring->dma, GFP_KERNEL);
-	if (!tx_ring->desc) {
-		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
-			 tx_ring->size);
-		goto err;
-	}
-
-	tx_ring->next_to_use = 0;
-	tx_ring->next_to_clean = 0;
-	tx_ring->tx_stats.prev_pkt_ctr = -1;
-	return 0;
-
-err:
-	kfree(tx_ring->tx_bi);
-	tx_ring->tx_bi = NULL;
-	return -ENOMEM;
-}
-
-/**
- * i40evf_clean_rx_ring - Free Rx buffers
- * @rx_ring: ring to be cleaned
- **/
-void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
-{
-	unsigned long bi_size;
-	u16 i;
-
-	/* ring already cleared, nothing to do */
-	if (!rx_ring->rx_bi)
-		return;
-
-	if (rx_ring->skb) {
-		dev_kfree_skb(rx_ring->skb);
-		rx_ring->skb = NULL;
-	}
-
-	/* Free all the Rx ring sk_buffs */
-	for (i = 0; i < rx_ring->count; i++) {
-		struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
-
-		if (!rx_bi->page)
-			continue;
-
-		/* Invalidate cache lines that may have been written to by
-		 * device so that we avoid corrupting memory.
-		 */
-		dma_sync_single_range_for_cpu(rx_ring->dev,
-					      rx_bi->dma,
-					      rx_bi->page_offset,
-					      rx_ring->rx_buf_len,
-					      DMA_FROM_DEVICE);
-
-		/* free resources associated with mapping */
-		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
-				     i40e_rx_pg_size(rx_ring),
-				     DMA_FROM_DEVICE,
-				     I40E_RX_DMA_ATTR);
-
-		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
-
-		rx_bi->page = NULL;
-		rx_bi->page_offset = 0;
-	}
-
-	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
-	memset(rx_ring->rx_bi, 0, bi_size);
-
-	/* Zero out the descriptor ring */
-	memset(rx_ring->desc, 0, rx_ring->size);
-
-	rx_ring->next_to_alloc = 0;
-	rx_ring->next_to_clean = 0;
-	rx_ring->next_to_use = 0;
-}
-
-/**
- * i40evf_free_rx_resources - Free Rx resources
- * @rx_ring: ring to clean the resources from
- *
- * Free all receive software resources
- **/
-void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
-{
-	i40evf_clean_rx_ring(rx_ring);
-	kfree(rx_ring->rx_bi);
-	rx_ring->rx_bi = NULL;
-
-	if (rx_ring->desc) {
-		dma_free_coherent(rx_ring->dev, rx_ring->size,
-				  rx_ring->desc, rx_ring->dma);
-		rx_ring->desc = NULL;
-	}
-}
-
-/**
- * i40evf_setup_rx_descriptors - Allocate Rx descriptors
- * @rx_ring: Rx descriptor ring (for a specific queue) to setup
- *
- * Returns 0 on success, negative on failure
- **/
-int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
-{
-	struct device *dev = rx_ring->dev;
-	int bi_size;
-
-	/* warn if we are about to overwrite the pointer */
-	WARN_ON(rx_ring->rx_bi);
-	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
-	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
-	if (!rx_ring->rx_bi)
-		goto err;
-
-	u64_stats_init(&rx_ring->syncp);
-
-	/* Round up to nearest 4K */
-	rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
-	rx_ring->size = ALIGN(rx_ring->size, 4096);
-	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
-					   &rx_ring->dma, GFP_KERNEL);
-
-	if (!rx_ring->desc) {
-		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
-			 rx_ring->size);
-		goto err;
-	}
-
-	rx_ring->next_to_alloc = 0;
-	rx_ring->next_to_clean = 0;
-	rx_ring->next_to_use = 0;
-
-	return 0;
-err:
-	kfree(rx_ring->rx_bi);
-	rx_ring->rx_bi = NULL;
-	return -ENOMEM;
-}
-
-/**
- * i40e_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
- **/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
-{
-	rx_ring->next_to_use = val;
-
-	/* update next to alloc since we have filled the ring */
-	rx_ring->next_to_alloc = val;
-
-	/* Force memory writes to complete before letting h/w
-	 * know there are new descriptors to fetch.  (Only
-	 * applicable for weak-ordered memory model archs,
-	 * such as IA-64).
-	 */
-	wmb();
-	writel(val, rx_ring->tail);
-}
-
-/**
- * i40e_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
-{
-	return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
-}
-
-/**
- * i40e_alloc_mapped_page - recycle or make a new page
- * @rx_ring: ring to use
- * @bi: rx_buffer struct to modify
- *
- * Returns true if the page was successfully allocated or
- * reused.
- **/
-static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
-				   struct i40e_rx_buffer *bi)
-{
-	struct page *page = bi->page;
-	dma_addr_t dma;
-
-	/* since we are recycling buffers we should seldom need to alloc */
-	if (likely(page)) {
-		rx_ring->rx_stats.page_reuse_count++;
-		return true;
-	}
-
-	/* alloc new page for storage */
-	page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
-	if (unlikely(!page)) {
-		rx_ring->rx_stats.alloc_page_failed++;
-		return false;
-	}
-
-	/* map page for use */
-	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
-				 i40e_rx_pg_size(rx_ring),
-				 DMA_FROM_DEVICE,
-				 I40E_RX_DMA_ATTR);
-
-	/* if mapping failed free memory back to system since
-	 * there isn't much point in holding memory we can't use
-	 */
-	if (dma_mapping_error(rx_ring->dev, dma)) {
-		__free_pages(page, i40e_rx_pg_order(rx_ring));
-		rx_ring->rx_stats.alloc_page_failed++;
-		return false;
-	}
-
-	bi->dma = dma;
-	bi->page = page;
-	bi->page_offset = i40e_rx_offset(rx_ring);
-
-	/* initialize pagecnt_bias to 1 representing we fully own page */
-	bi->pagecnt_bias = 1;
-
-	return true;
-}
-
-/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring:  rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
-			     struct sk_buff *skb, u16 vlan_tag)
-{
-	struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
-	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
-	    (vlan_tag & VLAN_VID_MASK))
-		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
-	napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
- * i40evf_alloc_rx_buffers - Replace used receive buffers
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
- *
- * Returns false if all allocations were successful, true if any fail
- **/
-bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
-{
-	u16 ntu = rx_ring->next_to_use;
-	union i40e_rx_desc *rx_desc;
-	struct i40e_rx_buffer *bi;
-
-	/* do nothing if no valid netdev defined */
-	if (!rx_ring->netdev || !cleaned_count)
-		return false;
-
-	rx_desc = I40E_RX_DESC(rx_ring, ntu);
-	bi = &rx_ring->rx_bi[ntu];
-
-	do {
-		if (!i40e_alloc_mapped_page(rx_ring, bi))
-			goto no_buffers;
-
-		/* sync the buffer for use by the device */
-		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
-						 bi->page_offset,
-						 rx_ring->rx_buf_len,
-						 DMA_FROM_DEVICE);
-
-		/* Refresh the desc even if buffer_addrs didn't change
-		 * because each write-back erases this info.
-		 */
-		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
-
-		rx_desc++;
-		bi++;
-		ntu++;
-		if (unlikely(ntu == rx_ring->count)) {
-			rx_desc = I40E_RX_DESC(rx_ring, 0);
-			bi = rx_ring->rx_bi;
-			ntu = 0;
-		}
-
-		/* clear the status bits for the next_to_use descriptor */
-		rx_desc->wb.qword1.status_error_len = 0;
-
-		cleaned_count--;
-	} while (cleaned_count);
-
-	if (rx_ring->next_to_use != ntu)
-		i40e_release_rx_desc(rx_ring, ntu);
-
-	return false;
-
-no_buffers:
-	if (rx_ring->next_to_use != ntu)
-		i40e_release_rx_desc(rx_ring, ntu);
-
-	/* make sure to come back via polling to try again after
-	 * allocation failure
-	 */
-	return true;
-}
-
-/**
- * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
- * @vsi: the VSI we care about
- * @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
- **/
-static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
-				    struct sk_buff *skb,
-				    union i40e_rx_desc *rx_desc)
-{
-	struct i40e_rx_ptype_decoded decoded;
-	u32 rx_error, rx_status;
-	bool ipv4, ipv6;
-	u8 ptype;
-	u64 qword;
-
-	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-	ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
-	rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-		   I40E_RXD_QW1_ERROR_SHIFT;
-	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-		    I40E_RXD_QW1_STATUS_SHIFT;
-	decoded = decode_rx_desc_ptype(ptype);
-
-	skb->ip_summed = CHECKSUM_NONE;
-
-	skb_checksum_none_assert(skb);
-
-	/* Rx csum enabled and ip headers found? */
-	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
-		return;
-
-	/* did the hardware decode the packet and checksum? */
-	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
-		return;
-
-	/* both known and outer_ip must be set for the below code to work */
-	if (!(decoded.known && decoded.outer_ip))
-		return;
-
-	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
-	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
-	ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
-	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
-
-	if (ipv4 &&
-	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
-			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
-		goto checksum_fail;
-
-	/* likely incorrect csum if alternate IP extension headers found */
-	if (ipv6 &&
-	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
-		/* don't increment checksum err here, non-fatal err */
-		return;
-
-	/* there was some L4 error, count error and punt packet to the stack */
-	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
-		goto checksum_fail;
-
-	/* handle packets that were not able to be checksummed due
-	 * to arrival speed, in this case the stack can compute
-	 * the csum.
-	 */
-	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
-		return;
-
-	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
-	switch (decoded.inner_prot) {
-	case I40E_RX_PTYPE_INNER_PROT_TCP:
-	case I40E_RX_PTYPE_INNER_PROT_UDP:
-	case I40E_RX_PTYPE_INNER_PROT_SCTP:
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		/* fall though */
-	default:
-		break;
-	}
-
-	return;
-
-checksum_fail:
-	vsi->back->hw_csum_rx_error++;
-}
-
-/**
- * i40e_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- **/
-static inline int i40e_ptype_to_htype(u8 ptype)
-{
-	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
-
-	if (!decoded.known)
-		return PKT_HASH_TYPE_NONE;
-
-	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
-	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
-		return PKT_HASH_TYPE_L4;
-	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
-		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
-		return PKT_HASH_TYPE_L3;
-	else
-		return PKT_HASH_TYPE_L2;
-}
-
-/**
- * i40e_rx_hash - set the hash value in the skb
- * @ring: descriptor ring
- * @rx_desc: specific descriptor
- * @skb: skb currently being received and modified
- * @rx_ptype: Rx packet type
- **/
-static inline void i40e_rx_hash(struct i40e_ring *ring,
-				union i40e_rx_desc *rx_desc,
-				struct sk_buff *skb,
-				u8 rx_ptype)
-{
-	u32 hash;
-	const __le64 rss_mask =
-		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
-			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
-
-	if (ring->netdev->features & NETIF_F_RXHASH)
-		return;
-
-	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
-		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
-		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
-	}
-}
-
-/**
- * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
- *
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, protocol, and
- * other fields within the skb.
- **/
-static inline
-void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
-			       union i40e_rx_desc *rx_desc, struct sk_buff *skb,
-			       u8 rx_ptype)
-{
-	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-
-	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
-
-	skb_record_rx_queue(skb, rx_ring->queue_index);
-
-	/* modifies the skb - consumes the enet header */
-	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-}
-
-/**
- * i40e_cleanup_headers - Correct empty headers
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being fixed
- *
- * Also address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
- *
- * In addition if skb is not at least 60 bytes we need to pad it so that
- * it is large enough to qualify as a valid Ethernet frame.
- *
- * Returns true if an error was encountered and skb was freed.
- **/
-static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
-{
-	/* if eth_skb_pad returns an error the skb was freed */
-	if (eth_skb_pad(skb))
-		return true;
-
-	return false;
-}
-
-/**
- * i40e_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
-			       struct i40e_rx_buffer *old_buff)
-{
-	struct i40e_rx_buffer *new_buff;
-	u16 nta = rx_ring->next_to_alloc;
-
-	new_buff = &rx_ring->rx_bi[nta];
-
-	/* update, and store next to alloc */
-	nta++;
-	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
-	/* transfer page from old buffer to new buffer */
-	new_buff->dma		= old_buff->dma;
-	new_buff->page		= old_buff->page;
-	new_buff->page_offset	= old_buff->page_offset;
-	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
-}
-
-/**
- * i40e_page_is_reusable - check if any reuse is possible
- * @page: page struct to check
- *
- * A page is not reusable if it was allocated under low memory
- * conditions, or it's not in the same NUMA node as this CPU.
- */
-static inline bool i40e_page_is_reusable(struct page *page)
-{
-	return (page_to_nid(page) == numa_mem_id()) &&
-		!page_is_pfmemalloc(page);
-}
-
-/**
- * i40e_can_reuse_rx_page - Determine if this page can be reused by
- * the adapter for another receive
- *
- * @rx_buffer: buffer containing the page
- *
- * If page is reusable, rx_buffer->page_offset is adjusted to point to
- * an unused region in the page.
- *
- * For small pages, @truesize will be a constant value, half the size
- * of the memory at page.  We'll attempt to alternate between high and
- * low halves of the page, with one half ready for use by the hardware
- * and the other half being consumed by the stack.  We use the page
- * ref count to determine whether the stack has finished consuming the
- * portion of this page that was passed up with a previous packet.  If
- * the page ref count is >1, we'll assume the "other" half page is
- * still busy, and this page cannot be reused.
- *
- * For larger pages, @truesize will be the actual space used by the
- * received packet (adjusted upward to an even multiple of the cache
- * line size).  This will advance through the page by the amount
- * actually consumed by the received packets while there is still
- * space for a buffer.  Each region of larger pages will be used at
- * most once, after which the page will not be reused.
- *
- * In either case, if the page is reusable its refcount is increased.
- **/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
-{
-	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
-	struct page *page = rx_buffer->page;
-
-	/* Is any reuse possible? */
-	if (unlikely(!i40e_page_is_reusable(page)))
-		return false;
-
-#if (PAGE_SIZE < 8192)
-	/* if we are only owner of page we can reuse it */
-	if (unlikely((page_count(page) - pagecnt_bias) > 1))
-		return false;
-#else
-#define I40E_LAST_OFFSET \
-	(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
-	if (rx_buffer->page_offset > I40E_LAST_OFFSET)
-		return false;
-#endif
-
-	/* If we have drained the page fragment pool we need to update
-	 * the pagecnt_bias and page count so that we fully restock the
-	 * number of references the driver holds.
-	 */
-	if (unlikely(!pagecnt_bias)) {
-		page_ref_add(page, USHRT_MAX);
-		rx_buffer->pagecnt_bias = USHRT_MAX;
-	}
-
-	return true;
-}
-
-/**
- * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
- * @skb: sk_buff to place the data into
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buffer->page to the skb.
- * It will just attach the page as a frag to the skb.
- *
- * The function will then update the page offset.
- **/
-static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
-			     struct i40e_rx_buffer *rx_buffer,
-			     struct sk_buff *skb,
-			     unsigned int size)
-{
-#if (PAGE_SIZE < 8192)
-	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
-#else
-	unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
-#endif
-
-	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
-			rx_buffer->page_offset, size, truesize);
-
-	/* page is being used so we must update the page offset */
-#if (PAGE_SIZE < 8192)
-	rx_buffer->page_offset ^= truesize;
-#else
-	rx_buffer->page_offset += truesize;
-#endif
-}
-
-/**
- * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
- * @rx_ring: rx descriptor ring to transact packets on
- * @size: size of buffer to add to skb
- *
- * This function will pull an Rx buffer from the ring and synchronize it
- * for use by the CPU.
- */
-static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
-						 const unsigned int size)
-{
-	struct i40e_rx_buffer *rx_buffer;
-
-	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
-	prefetchw(rx_buffer->page);
-
-	/* we are reusing so sync this buffer for CPU use */
-	dma_sync_single_range_for_cpu(rx_ring->dev,
-				      rx_buffer->dma,
-				      rx_buffer->page_offset,
-				      size,
-				      DMA_FROM_DEVICE);
-
-	/* We have pulled a buffer for use, so decrement pagecnt_bias */
-	rx_buffer->pagecnt_bias--;
-
-	return rx_buffer;
-}
-
-/**
- * i40e_construct_skb - Allocate skb and populate it
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- * @size: size of buffer to add to skb
- *
- * This function allocates an skb.  It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
- */
-static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
-					  struct i40e_rx_buffer *rx_buffer,
-					  unsigned int size)
-{
-	void *va;
-#if (PAGE_SIZE < 8192)
-	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
-#else
-	unsigned int truesize = SKB_DATA_ALIGN(size);
-#endif
-	unsigned int headlen;
-	struct sk_buff *skb;
-
-	/* prefetch first cache line of first page */
-	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
-	prefetch(va);
-#if L1_CACHE_BYTES < 128
-	prefetch(va + L1_CACHE_BYTES);
-#endif
-
-	/* allocate a skb to store the frags */
-	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
-			       I40E_RX_HDR_SIZE,
-			       GFP_ATOMIC | __GFP_NOWARN);
-	if (unlikely(!skb))
-		return NULL;
-
-	/* Determine available headroom for copy */
-	headlen = size;
-	if (headlen > I40E_RX_HDR_SIZE)
-		headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
-
-	/* align pull length to size of long to optimize memcpy performance */
-	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
-
-	/* update all of the pointers */
-	size -= headlen;
-	if (size) {
-		skb_add_rx_frag(skb, 0, rx_buffer->page,
-				rx_buffer->page_offset + headlen,
-				size, truesize);
-
-		/* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
-		rx_buffer->page_offset ^= truesize;
-#else
-		rx_buffer->page_offset += truesize;
-#endif
-	} else {
-		/* buffer is unused, reset bias back to rx_buffer */
-		rx_buffer->pagecnt_bias++;
-	}
-
-	return skb;
-}
-
-/**
- * i40e_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buffer: Rx buffer to pull data from
- * @size: size of buffer to add to skb
- *
- * This function builds an skb around an existing Rx buffer, taking care
- * to set up the skb correctly and avoid any memcpy overhead.
- */
-static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
-				      struct i40e_rx_buffer *rx_buffer,
-				      unsigned int size)
-{
-	void *va;
-#if (PAGE_SIZE < 8192)
-	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
-#else
-	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-				SKB_DATA_ALIGN(I40E_SKB_PAD + size);
-#endif
-	struct sk_buff *skb;
-
-	/* prefetch first cache line of first page */
-	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
-	prefetch(va);
-#if L1_CACHE_BYTES < 128
-	prefetch(va + L1_CACHE_BYTES);
-#endif
-	/* build an skb around the page buffer */
-	skb = build_skb(va - I40E_SKB_PAD, truesize);
-	if (unlikely(!skb))
-		return NULL;
-
-	/* update pointers within the skb to store the data */
-	skb_reserve(skb, I40E_SKB_PAD);
-	__skb_put(skb, size);
-
-	/* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
-	rx_buffer->page_offset ^= truesize;
-#else
-	rx_buffer->page_offset += truesize;
-#endif
-
-	return skb;
-}
-
-/**
- * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- *
- * This function will clean up the contents of the rx_buffer.  It will
- * either recycle the buffer or unmap it and free the associated resources.
- */
-static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
-			       struct i40e_rx_buffer *rx_buffer)
-{
-	if (i40e_can_reuse_rx_page(rx_buffer)) {
-		/* hand second half of page back to the ring */
-		i40e_reuse_rx_page(rx_ring, rx_buffer);
-		rx_ring->rx_stats.page_reuse_count++;
-	} else {
-		/* we are not reusing the buffer so unmap it */
-		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
-				     i40e_rx_pg_size(rx_ring),
-				     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
-		__page_frag_cache_drain(rx_buffer->page,
-					rx_buffer->pagecnt_bias);
-	}
-
-	/* clear contents of buffer_info */
-	rx_buffer->page = NULL;
-}
-
-/**
- * i40e_is_non_eop - process handling of non-EOP buffers
- * @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
- *
- * This function updates next to clean.  If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
- **/
-static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
-			    union i40e_rx_desc *rx_desc,
-			    struct sk_buff *skb)
-{
-	u32 ntc = rx_ring->next_to_clean + 1;
-
-	/* fetch, update, and store next to clean */
-	ntc = (ntc < rx_ring->count) ? ntc : 0;
-	rx_ring->next_to_clean = ntc;
-
-	prefetch(I40E_RX_DESC(rx_ring, ntc));
-
-	/* if we are the last buffer then there is nothing else to do */
-#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
-	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
-		return false;
-
-	rx_ring->rx_stats.non_eop_descs++;
-
-	return true;
-}
-
-/**
- * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
- * @rx_ring: rx descriptor ring to transact packets on
- * @budget: Total limit on number of packets to process
- *
- * This function provides a "bounce buffer" approach to Rx interrupt
- * processing.  The advantage to this is that on systems that have
- * expensive overhead for IOMMU access this provides a means of avoiding
- * it by maintaining the mapping of the page to the system.
- *
- * Returns amount of work completed
- **/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
-{
-	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-	struct sk_buff *skb = rx_ring->skb;
-	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	bool failure = false;
-
-	while (likely(total_rx_packets < (unsigned int)budget)) {
-		struct i40e_rx_buffer *rx_buffer;
-		union i40e_rx_desc *rx_desc;
-		unsigned int size;
-		u16 vlan_tag;
-		u8 rx_ptype;
-		u64 qword;
-
-		/* return some buffers to hardware, one at a time is too slow */
-		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-			failure = failure ||
-				  i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
-			cleaned_count = 0;
-		}
-
-		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
-		/* status_error_len will always be zero for unused descriptors
-		 * because it's cleared in cleanup, and overlaps with hdr_addr
-		 * which is always zero because packet split isn't used, if the
-		 * hardware wrote DD then the length will be non-zero
-		 */
-		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-
-		/* This memory barrier is needed to keep us from reading
-		 * any other fields out of the rx_desc until we have
-		 * verified the descriptor has been written back.
-		 */
-		dma_rmb();
-
-		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-		if (!size)
-			break;
-
-		i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
-		rx_buffer = i40e_get_rx_buffer(rx_ring, size);
-
-		/* retrieve a buffer from the ring */
-		if (skb)
-			i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
-		else if (ring_uses_build_skb(rx_ring))
-			skb = i40e_build_skb(rx_ring, rx_buffer, size);
-		else
-			skb = i40e_construct_skb(rx_ring, rx_buffer, size);
-
-		/* exit if we failed to retrieve a buffer */
-		if (!skb) {
-			rx_ring->rx_stats.alloc_buff_failed++;
-			rx_buffer->pagecnt_bias++;
-			break;
-		}
-
-		i40e_put_rx_buffer(rx_ring, rx_buffer);
-		cleaned_count++;
-
-		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
-			continue;
-
-		/* ERR_MASK will only have valid bits if EOP set, and
-		 * what we are doing here is actually checking
-		 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
-		 * the error field
-		 */
-		if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
-			dev_kfree_skb_any(skb);
-			skb = NULL;
-			continue;
-		}
-
-		if (i40e_cleanup_headers(rx_ring, skb)) {
-			skb = NULL;
-			continue;
-		}
-
-		/* probably a little skewed due to removing CRC */
-		total_rx_bytes += skb->len;
-
-		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-			   I40E_RXD_QW1_PTYPE_SHIFT;
-
-		/* populate checksum, VLAN, and protocol */
-		i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
-
-		vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
-			   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
-
-		i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
-		i40e_receive_skb(rx_ring, skb, vlan_tag);
-		skb = NULL;
-
-		/* update budget accounting */
-		total_rx_packets++;
-	}
-
-	rx_ring->skb = skb;
-
-	u64_stats_update_begin(&rx_ring->syncp);
-	rx_ring->stats.packets += total_rx_packets;
-	rx_ring->stats.bytes += total_rx_bytes;
-	u64_stats_update_end(&rx_ring->syncp);
-	rx_ring->q_vector->rx.total_packets += total_rx_packets;
-	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
-
-	/* guarantee a trip back through this routine if there was a failure */
-	return failure ? budget : (int)total_rx_packets;
-}
-
-static inline u32 i40e_buildreg_itr(const int type, u16 itr)
-{
-	u32 val;
-
-	/* We don't bother with setting the CLEARPBA bit as the data sheet
-	 * points out doing so is "meaningless since it was already
-	 * auto-cleared". The auto-clearing happens when the interrupt is
-	 * asserted.
-	 *
-	 * Hardware errata 28 for also indicates that writing to a
-	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
-	 * an event in the PBA anyway so we need to rely on the automask
-	 * to hold pending events for us until the interrupt is re-enabled
-	 *
-	 * The itr value is reported in microseconds, and the register
-	 * value is recorded in 2 microsecond units. For this reason we
-	 * only need to shift by the interval shift - 1 instead of the
-	 * full value.
-	 */
-	itr &= I40E_ITR_MASK;
-
-	val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-	      (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
-	      (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
-
-	return val;
-}
-
-/* a small macro to shorten up some long lines */
-#define INTREG I40E_VFINT_DYN_CTLN1
-
-/* The act of updating the ITR will cause it to immediately trigger. In order
- * to prevent this from throwing off adaptive update statistics we defer the
- * update so that it can only happen so often. So after either Tx or Rx are
- * updated we make the adaptive scheme wait until either the ITR completely
- * expires via the next_update expiration or we have been through at least
- * 3 interrupts.
- */
-#define ITR_COUNTDOWN_START 3
-
-/**
- * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
- * @vsi: the VSI we care about
- * @q_vector: q_vector for which itr is being updated and interrupt enabled
- *
- **/
-static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
-					  struct i40e_q_vector *q_vector)
-{
-	struct i40e_hw *hw = &vsi->back->hw;
-	u32 intval;
-
-	/* These will do nothing if dynamic updates are not enabled */
-	i40e_update_itr(q_vector, &q_vector->tx);
-	i40e_update_itr(q_vector, &q_vector->rx);
-
-	/* This block of logic allows us to get away with only updating
-	 * one ITR value with each interrupt. The idea is to perform a
-	 * pseudo-lazy update with the following criteria.
-	 *
-	 * 1. Rx is given higher priority than Tx if both are in same state
-	 * 2. If we must reduce an ITR that is given highest priority.
-	 * 3. We then give priority to increasing ITR based on amount.
-	 */
-	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
-		/* Rx ITR needs to be reduced, this is highest priority */
-		intval = i40e_buildreg_itr(I40E_RX_ITR,
-					   q_vector->rx.target_itr);
-		q_vector->rx.current_itr = q_vector->rx.target_itr;
-		q_vector->itr_countdown = ITR_COUNTDOWN_START;
-	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
-		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
-		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
-		/* Tx ITR needs to be reduced, this is second priority
-		 * Tx ITR needs to be increased more than Rx, fourth priority
-		 */
-		intval = i40e_buildreg_itr(I40E_TX_ITR,
-					   q_vector->tx.target_itr);
-		q_vector->tx.current_itr = q_vector->tx.target_itr;
-		q_vector->itr_countdown = ITR_COUNTDOWN_START;
-	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
-		/* Rx ITR needs to be increased, third priority */
-		intval = i40e_buildreg_itr(I40E_RX_ITR,
-					   q_vector->rx.target_itr);
-		q_vector->rx.current_itr = q_vector->rx.target_itr;
-		q_vector->itr_countdown = ITR_COUNTDOWN_START;
-	} else {
-		/* No ITR update, lowest priority */
-		intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
-		if (q_vector->itr_countdown)
-			q_vector->itr_countdown--;
-	}
-
-	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
-		wr32(hw, INTREG(q_vector->reg_idx), intval);
-}
-
-/**
- * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function will clean all queues associated with a q_vector.
- *
- * Returns the amount of work done
- **/
-int i40evf_napi_poll(struct napi_struct *napi, int budget)
-{
-	struct i40e_q_vector *q_vector =
-			       container_of(napi, struct i40e_q_vector, napi);
-	struct i40e_vsi *vsi = q_vector->vsi;
-	struct i40e_ring *ring;
-	bool clean_complete = true;
-	bool arm_wb = false;
-	int budget_per_ring;
-	int work_done = 0;
-
-	if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
-		napi_complete(napi);
-		return 0;
-	}
-
-	/* Since the actual Tx work is minimal, we can give the Tx a larger
-	 * budget and be more aggressive about cleaning up the Tx descriptors.
-	 */
-	i40e_for_each_ring(ring, q_vector->tx) {
-		if (!i40e_clean_tx_irq(vsi, ring, budget)) {
-			clean_complete = false;
-			continue;
-		}
-		arm_wb |= ring->arm_wb;
-		ring->arm_wb = false;
-	}
-
-	/* Handle case where we are called by netpoll with a budget of 0 */
-	if (budget <= 0)
-		goto tx_only;
-
-	/* We attempt to distribute budget to each Rx queue fairly, but don't
-	 * allow the budget to go below 1 because that would exit polling early.
-	 */
-	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
-
-	i40e_for_each_ring(ring, q_vector->rx) {
-		int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
-
-		work_done += cleaned;
-		/* if we clean as many as budgeted, we must not be done */
-		if (cleaned >= budget_per_ring)
-			clean_complete = false;
-	}
-
-	/* If work not completed, return budget and polling will return */
-	if (!clean_complete) {
-		int cpu_id = smp_processor_id();
-
-		/* It is possible that the interrupt affinity has changed but,
-		 * if the cpu is pegged at 100%, polling will never exit while
-		 * traffic continues and the interrupt will be stuck on this
-		 * cpu.  We check to make sure affinity is correct before we
-		 * continue to poll, otherwise we must stop polling so the
-		 * interrupt can move to the correct cpu.
-		 */
-		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
-			/* Tell napi that we are done polling */
-			napi_complete_done(napi, work_done);
-
-			/* Force an interrupt */
-			i40evf_force_wb(vsi, q_vector);
-
-			/* Return budget-1 so that polling stops */
-			return budget - 1;
-		}
-tx_only:
-		if (arm_wb) {
-			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
-			i40e_enable_wb_on_itr(vsi, q_vector);
-		}
-		return budget;
-	}
-
-	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
-		q_vector->arm_wb_state = false;
-
-	/* Work is done so exit the polling mode and re-enable the interrupt */
-	napi_complete_done(napi, work_done);
-
-	i40e_update_enable_itr(vsi, q_vector);
-
-	return min(work_done, budget - 1);
-}
-
-/**
- * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
- * @skb:     send buffer
- * @tx_ring: ring to send buffer on
- * @flags:   the tx flags to be set
- *
- * Checks the skb and set up correspondingly several generic transmit flags
- * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
- *
- * Returns error code indicate the frame should be dropped upon error and the
- * otherwise  returns 0 to indicate the flags has been set properly.
- **/
-static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
-					       struct i40e_ring *tx_ring,
-					       u32 *flags)
-{
-	__be16 protocol = skb->protocol;
-	u32  tx_flags = 0;
-
-	if (protocol == htons(ETH_P_8021Q) &&
-	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
-		/* When HW VLAN acceleration is turned off by the user the
-		 * stack sets the protocol to 8021q so that the driver
-		 * can take any steps required to support the SW only
-		 * VLAN handling.  In our case the driver doesn't need
-		 * to take any further steps so just set the protocol
-		 * to the encapsulated ethertype.
-		 */
-		skb->protocol = vlan_get_protocol(skb);
-		goto out;
-	}
-
-	/* if we have a HW VLAN tag being added, default to the HW one */
-	if (skb_vlan_tag_present(skb)) {
-		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
-		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
-	/* else if it is a SW VLAN, check the next protocol and store the tag */
-	} else if (protocol == htons(ETH_P_8021Q)) {
-		struct vlan_hdr *vhdr, _vhdr;
-
-		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
-		if (!vhdr)
-			return -EINVAL;
-
-		protocol = vhdr->h_vlan_encapsulated_proto;
-		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
-		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
-	}
-
-out:
-	*flags = tx_flags;
-	return 0;
-}
-
-/**
- * i40e_tso - set up the tso context descriptor
- * @first:    pointer to first Tx buffer for xmit
- * @hdr_len:  ptr to the size of the packet header
- * @cd_type_cmd_tso_mss: Quad Word 1
- *
- * Returns 0 if no TSO can happen, 1 if tso is going, or error
- **/
-static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
-		    u64 *cd_type_cmd_tso_mss)
-{
-	struct sk_buff *skb = first->skb;
-	u64 cd_cmd, cd_tso_len, cd_mss;
-	union {
-		struct iphdr *v4;
-		struct ipv6hdr *v6;
-		unsigned char *hdr;
-	} ip;
-	union {
-		struct tcphdr *tcp;
-		struct udphdr *udp;
-		unsigned char *hdr;
-	} l4;
-	u32 paylen, l4_offset;
-	u16 gso_segs, gso_size;
-	int err;
-
-	if (skb->ip_summed != CHECKSUM_PARTIAL)
-		return 0;
-
-	if (!skb_is_gso(skb))
-		return 0;
-
-	err = skb_cow_head(skb, 0);
-	if (err < 0)
-		return err;
-
-	ip.hdr = skb_network_header(skb);
-	l4.hdr = skb_transport_header(skb);
-
-	/* initialize outer IP header fields */
-	if (ip.v4->version == 4) {
-		ip.v4->tot_len = 0;
-		ip.v4->check = 0;
-	} else {
-		ip.v6->payload_len = 0;
-	}
-
-	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
-					 SKB_GSO_GRE_CSUM |
-					 SKB_GSO_IPXIP4 |
-					 SKB_GSO_IPXIP6 |
-					 SKB_GSO_UDP_TUNNEL |
-					 SKB_GSO_UDP_TUNNEL_CSUM)) {
-		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
-		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
-			l4.udp->len = 0;
-
-			/* determine offset of outer transport header */
-			l4_offset = l4.hdr - skb->data;
-
-			/* remove payload length from outer checksum */
-			paylen = skb->len - l4_offset;
-			csum_replace_by_diff(&l4.udp->check,
-					     (__force __wsum)htonl(paylen));
-		}
-
-		/* reset pointers to inner headers */
-		ip.hdr = skb_inner_network_header(skb);
-		l4.hdr = skb_inner_transport_header(skb);
-
-		/* initialize inner IP header fields */
-		if (ip.v4->version == 4) {
-			ip.v4->tot_len = 0;
-			ip.v4->check = 0;
-		} else {
-			ip.v6->payload_len = 0;
-		}
-	}
-
-	/* determine offset of inner transport header */
-	l4_offset = l4.hdr - skb->data;
-
-	/* remove payload length from inner checksum */
-	paylen = skb->len - l4_offset;
-	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
-
-	/* compute length of segmentation header */
-	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
-	/* pull values out of skb_shinfo */
-	gso_size = skb_shinfo(skb)->gso_size;
-	gso_segs = skb_shinfo(skb)->gso_segs;
-
-	/* update GSO size and bytecount with header size */
-	first->gso_segs = gso_segs;
-	first->bytecount += (first->gso_segs - 1) * *hdr_len;
-
-	/* find the field values */
-	cd_cmd = I40E_TX_CTX_DESC_TSO;
-	cd_tso_len = skb->len - *hdr_len;
-	cd_mss = gso_size;
-	*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
-				(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-				(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
-	return 1;
-}
-
-/**
- * i40e_tx_enable_csum - Enable Tx checksum offloads
- * @skb: send buffer
- * @tx_flags: pointer to Tx flags currently set
- * @td_cmd: Tx descriptor command bits to set
- * @td_offset: Tx descriptor header offsets to set
- * @tx_ring: Tx descriptor ring
- * @cd_tunneling: ptr to context desc bits
- **/
-static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
-			       u32 *td_cmd, u32 *td_offset,
-			       struct i40e_ring *tx_ring,
-			       u32 *cd_tunneling)
-{
-	union {
-		struct iphdr *v4;
-		struct ipv6hdr *v6;
-		unsigned char *hdr;
-	} ip;
-	union {
-		struct tcphdr *tcp;
-		struct udphdr *udp;
-		unsigned char *hdr;
-	} l4;
-	unsigned char *exthdr;
-	u32 offset, cmd = 0;
-	__be16 frag_off;
-	u8 l4_proto = 0;
-
-	if (skb->ip_summed != CHECKSUM_PARTIAL)
-		return 0;
-
-	ip.hdr = skb_network_header(skb);
-	l4.hdr = skb_transport_header(skb);
-
-	/* compute outer L2 header size */
-	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	if (skb->encapsulation) {
-		u32 tunnel = 0;
-		/* define outer network header type */
-		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
-			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
-				  I40E_TX_CTX_EXT_IP_IPV4 :
-				  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-
-			l4_proto = ip.v4->protocol;
-		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
-			tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
-
-			exthdr = ip.hdr + sizeof(*ip.v6);
-			l4_proto = ip.v6->nexthdr;
-			if (l4.hdr != exthdr)
-				ipv6_skip_exthdr(skb, exthdr - skb->data,
-						 &l4_proto, &frag_off);
-		}
-
-		/* define outer transport */
-		switch (l4_proto) {
-		case IPPROTO_UDP:
-			tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
-			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
-			break;
-		case IPPROTO_GRE:
-			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
-			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
-			break;
-		case IPPROTO_IPIP:
-		case IPPROTO_IPV6:
-			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
-			l4.hdr = skb_inner_network_header(skb);
-			break;
-		default:
-			if (*tx_flags & I40E_TX_FLAGS_TSO)
-				return -1;
-
-			skb_checksum_help(skb);
-			return 0;
-		}
-
-		/* compute outer L3 header size */
-		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
-			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
-		/* switch IP header pointer from outer to inner header */
-		ip.hdr = skb_inner_network_header(skb);
-
-		/* compute tunnel header size */
-		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
-			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
-		/* indicate if we need to offload outer UDP header */
-		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
-		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
-		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
-			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
-
-		/* record tunnel offload values */
-		*cd_tunneling |= tunnel;
-
-		/* switch L4 header pointer from outer to inner */
-		l4.hdr = skb_inner_transport_header(skb);
-		l4_proto = 0;
-
-		/* reset type as we transition from outer to inner headers */
-		*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
-		if (ip.v4->version == 4)
-			*tx_flags |= I40E_TX_FLAGS_IPV4;
-		if (ip.v6->version == 6)
-			*tx_flags |= I40E_TX_FLAGS_IPV6;
-	}
-
-	/* Enable IP checksum offloads */
-	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
-		l4_proto = ip.v4->protocol;
-		/* the stack computes the IP header already, the only time we
-		 * need the hardware to recompute it is in the case of TSO.
-		 */
-		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
-		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
-		       I40E_TX_DESC_CMD_IIPT_IPV4;
-	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
-		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
-
-		exthdr = ip.hdr + sizeof(*ip.v6);
-		l4_proto = ip.v6->nexthdr;
-		if (l4.hdr != exthdr)
-			ipv6_skip_exthdr(skb, exthdr - skb->data,
-					 &l4_proto, &frag_off);
-	}
-
-	/* compute inner L3 header size */
-	offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
-
-	/* Enable L4 checksum offloads */
-	switch (l4_proto) {
-	case IPPROTO_TCP:
-		/* enable checksum offloads */
-		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
-		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	case IPPROTO_SCTP:
-		/* enable SCTP checksum offload */
-		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
-		offset |= (sizeof(struct sctphdr) >> 2) <<
-			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	case IPPROTO_UDP:
-		/* enable UDP checksum offload */
-		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
-		offset |= (sizeof(struct udphdr) >> 2) <<
-			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
-		if (*tx_flags & I40E_TX_FLAGS_TSO)
-			return -1;
-		skb_checksum_help(skb);
-		return 0;
-	}
-
-	*td_cmd |= cmd;
-	*td_offset |= offset;
-
-	return 1;
-}
-
-/**
- * i40e_create_tx_ctx Build the Tx context descriptor
- * @tx_ring:  ring to create the descriptor on
- * @cd_type_cmd_tso_mss: Quad Word 1
- * @cd_tunneling: Quad Word 0 - bits 0-31
- * @cd_l2tag2: Quad Word 0 - bits 32-63
- **/
-static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
-			       const u64 cd_type_cmd_tso_mss,
-			       const u32 cd_tunneling, const u32 cd_l2tag2)
-{
-	struct i40e_tx_context_desc *context_desc;
-	int i = tx_ring->next_to_use;
-
-	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
-	    !cd_tunneling && !cd_l2tag2)
-		return;
-
-	/* grab the next descriptor */
-	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
-
-	i++;
-	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
-
-	/* cpu_to_le32 and assign to struct fields */
-	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
-	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
-	context_desc->rsvd = cpu_to_le16(0);
-	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
-}
-
-/**
- * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
- * @skb:      send buffer
- *
- * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
- * and so we need to figure out the cases where we need to linearize the skb.
- *
- * For TSO we need to count the TSO header and segment payload separately.
- * As such we need to check cases where we have 7 fragments or more as we
- * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
- * the segment payload in the first descriptor, and another 7 for the
- * fragments.
- **/
-bool __i40evf_chk_linearize(struct sk_buff *skb)
-{
-	const struct skb_frag_struct *frag, *stale;
-	int nr_frags, sum;
-
-	/* no need to check if number of frags is less than 7 */
-	nr_frags = skb_shinfo(skb)->nr_frags;
-	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
-		return false;
-
-	/* We need to walk through the list and validate that each group
-	 * of 6 fragments totals at least gso_size.
-	 */
-	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
-	frag = &skb_shinfo(skb)->frags[0];
-
-	/* Initialize size to the negative value of gso_size minus 1.  We
-	 * use this as the worst case scenerio in which the frag ahead
-	 * of us only provides one byte which is why we are limited to 6
-	 * descriptors for a single transmit as the header and previous
-	 * fragment are already consuming 2 descriptors.
-	 */
-	sum = 1 - skb_shinfo(skb)->gso_size;
-
-	/* Add size of frags 0 through 4 to create our initial sum */
-	sum += skb_frag_size(frag++);
-	sum += skb_frag_size(frag++);
-	sum += skb_frag_size(frag++);
-	sum += skb_frag_size(frag++);
-	sum += skb_frag_size(frag++);
-
-	/* Walk through fragments adding latest fragment, testing it, and
-	 * then removing stale fragments from the sum.
-	 */
-	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
-		int stale_size = skb_frag_size(stale);
-
-		sum += skb_frag_size(frag++);
-
-		/* The stale fragment may present us with a smaller
-		 * descriptor than the actual fragment size. To account
-		 * for that we need to remove all the data on the front and
-		 * figure out what the remainder would be in the last
-		 * descriptor associated with the fragment.
-		 */
-		if (stale_size > I40E_MAX_DATA_PER_TXD) {
-			int align_pad = -(stale->page_offset) &
-					(I40E_MAX_READ_REQ_SIZE - 1);
-
-			sum -= align_pad;
-			stale_size -= align_pad;
-
-			do {
-				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
-				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
-			} while (stale_size > I40E_MAX_DATA_PER_TXD);
-		}
-
-		/* if sum is negative we failed to make sufficient progress */
-		if (sum < 0)
-			return true;
-
-		if (!nr_frags--)
-			break;
-
-		sum -= stale_size;
-	}
-
-	return false;
-}
-
-/**
- * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns -EBUSY if a stop is needed, else 0
- **/
-int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
-	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-	/* Memory barrier before checking head and tail */
-	smp_mb();
-
-	/* Check again in a case another CPU has just made room available. */
-	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
-		return -EBUSY;
-
-	/* A reprieve! - use start_queue because it doesn't call schedule */
-	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
-	++tx_ring->tx_stats.restart_queue;
-	return 0;
-}
-
-/**
- * i40evf_tx_map - Build the Tx descriptor
- * @tx_ring:  ring to send buffer on
- * @skb:      send buffer
- * @first:    first buffer info buffer to use
- * @tx_flags: collected send information
- * @hdr_len:  size of the packet header
- * @td_cmd:   the command field in the descriptor
- * @td_offset: offset for checksum or crc
- **/
-static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-				 struct i40e_tx_buffer *first, u32 tx_flags,
-				 const u8 hdr_len, u32 td_cmd, u32 td_offset)
-{
-	unsigned int data_len = skb->data_len;
-	unsigned int size = skb_headlen(skb);
-	struct skb_frag_struct *frag;
-	struct i40e_tx_buffer *tx_bi;
-	struct i40e_tx_desc *tx_desc;
-	u16 i = tx_ring->next_to_use;
-	u32 td_tag = 0;
-	dma_addr_t dma;
-
-	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
-		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
-		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
-			 I40E_TX_FLAGS_VLAN_SHIFT;
-	}
-
-	first->tx_flags = tx_flags;
-
-	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
-
-	tx_desc = I40E_TX_DESC(tx_ring, i);
-	tx_bi = first;
-
-	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
-		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
-
-		if (dma_mapping_error(tx_ring->dev, dma))
-			goto dma_error;
-
-		/* record length, and DMA address */
-		dma_unmap_len_set(tx_bi, len, size);
-		dma_unmap_addr_set(tx_bi, dma, dma);
-
-		/* align size to end of page */
-		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
-		tx_desc->buffer_addr = cpu_to_le64(dma);
-
-		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
-			tx_desc->cmd_type_offset_bsz =
-				build_ctob(td_cmd, td_offset,
-					   max_data, td_tag);
-
-			tx_desc++;
-			i++;
-
-			if (i == tx_ring->count) {
-				tx_desc = I40E_TX_DESC(tx_ring, 0);
-				i = 0;
-			}
-
-			dma += max_data;
-			size -= max_data;
-
-			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
-			tx_desc->buffer_addr = cpu_to_le64(dma);
-		}
-
-		if (likely(!data_len))
-			break;
-
-		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
-							  size, td_tag);
-
-		tx_desc++;
-		i++;
-
-		if (i == tx_ring->count) {
-			tx_desc = I40E_TX_DESC(tx_ring, 0);
-			i = 0;
-		}
-
-		size = skb_frag_size(frag);
-		data_len -= size;
-
-		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
-				       DMA_TO_DEVICE);
-
-		tx_bi = &tx_ring->tx_bi[i];
-	}
-
-	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
-	i++;
-	if (i == tx_ring->count)
-		i = 0;
-
-	tx_ring->next_to_use = i;
-
-	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
-
-	/* write last descriptor with RS and EOP bits */
-	td_cmd |= I40E_TXD_CMD;
-	tx_desc->cmd_type_offset_bsz =
-			build_ctob(td_cmd, td_offset, size, td_tag);
-
-	/* Force memory writes to complete before letting h/w know there
-	 * are new descriptors to fetch.
-	 *
-	 * We also use this memory barrier to make certain all of the
-	 * status bits have been updated before next_to_watch is written.
-	 */
-	wmb();
-
-	/* set next_to_watch value indicating a packet is present */
-	first->next_to_watch = tx_desc;
-
-	/* notify HW of packet */
-	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
-		writel(i, tx_ring->tail);
-
-		/* we need this if more than one processor can write to our tail
-		 * at a time, it synchronizes IO on IA64/Altix systems
-		 */
-		mmiowb();
-	}
-
-	return;
-
-dma_error:
-	dev_info(tx_ring->dev, "TX DMA map failed\n");
-
-	/* clear dma mappings for failed tx_bi map */
-	for (;;) {
-		tx_bi = &tx_ring->tx_bi[i];
-		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
-		if (tx_bi == first)
-			break;
-		if (i == 0)
-			i = tx_ring->count;
-		i--;
-	}
-
-	tx_ring->next_to_use = i;
-}
-
-/**
- * i40e_xmit_frame_ring - Sends buffer on Tx ring
- * @skb:     send buffer
- * @tx_ring: ring to send buffer on
- *
- * Returns NETDEV_TX_OK if sent, else an error code
- **/
-static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
-					struct i40e_ring *tx_ring)
-{
-	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
-	u32 cd_tunneling = 0, cd_l2tag2 = 0;
-	struct i40e_tx_buffer *first;
-	u32 td_offset = 0;
-	u32 tx_flags = 0;
-	__be16 protocol;
-	u32 td_cmd = 0;
-	u8 hdr_len = 0;
-	int tso, count;
-
-	/* prefetch the data, we'll need it later */
-	prefetch(skb->data);
-
-	i40e_trace(xmit_frame_ring, skb, tx_ring);
-
-	count = i40e_xmit_descriptor_count(skb);
-	if (i40e_chk_linearize(skb, count)) {
-		if (__skb_linearize(skb)) {
-			dev_kfree_skb_any(skb);
-			return NETDEV_TX_OK;
-		}
-		count = i40e_txd_use_count(skb->len);
-		tx_ring->tx_stats.tx_linearize++;
-	}
-
-	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
-	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
-	 *       + 4 desc gap to avoid the cache line where head is,
-	 *       + 1 desc for context descriptor,
-	 * otherwise try next time
-	 */
-	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
-		tx_ring->tx_stats.tx_busy++;
-		return NETDEV_TX_BUSY;
-	}
-
-	/* record the location of the first descriptor for this packet */
-	first = &tx_ring->tx_bi[tx_ring->next_to_use];
-	first->skb = skb;
-	first->bytecount = skb->len;
-	first->gso_segs = 1;
-
-	/* prepare the xmit flags */
-	if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
-		goto out_drop;
-
-	/* obtain protocol of skb */
-	protocol = vlan_get_protocol(skb);
-
-	/* setup IPv4/IPv6 offloads */
-	if (protocol == htons(ETH_P_IP))
-		tx_flags |= I40E_TX_FLAGS_IPV4;
-	else if (protocol == htons(ETH_P_IPV6))
-		tx_flags |= I40E_TX_FLAGS_IPV6;
-
-	tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
-
-	if (tso < 0)
-		goto out_drop;
-	else if (tso)
-		tx_flags |= I40E_TX_FLAGS_TSO;
-
-	/* Always offload the checksum, since it's in the data descriptor */
-	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
-				  tx_ring, &cd_tunneling);
-	if (tso < 0)
-		goto out_drop;
-
-	skb_tx_timestamp(skb);
-
-	/* always enable CRC insertion offload */
-	td_cmd |= I40E_TX_DESC_CMD_ICRC;
-
-	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
-			   cd_tunneling, cd_l2tag2);
-
-	i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
-		      td_cmd, td_offset);
-
-	return NETDEV_TX_OK;
-
-out_drop:
-	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
-	dev_kfree_skb_any(first->skb);
-	first->skb = NULL;
-	return NETDEV_TX_OK;
-}
-
-/**
- * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
- * @skb:    send buffer
- * @netdev: network interface device structure
- *
- * Returns NETDEV_TX_OK if sent, else an error code
- **/
-netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
-
-	/* hardware can't handle really short frames, hardware padding works
-	 * beyond this point
-	 */
-	if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
-		if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
-			return NETDEV_TX_OK;
-		skb->len = I40E_MIN_TX_LEN;
-		skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
-	}
-
-	return i40e_xmit_frame_ring(skb, tx_ring);
-}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
deleted file mode 100644
index 3b5a63b3236e..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ /dev/null
@@ -1,524 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_TXRX_H_
-#define _I40E_TXRX_H_
-
-/* Interrupt Throttling and Rate Limiting Goodies */
-#define I40E_DEFAULT_IRQ_WORK      256
-
-/* The datasheet for the X710 and XL710 indicate that the maximum value for
- * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
- * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
- * the register value which is divided by 2 lets use the actual values and
- * avoid an excessive amount of translation.
- */
-#define I40E_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
-#define I40E_ITR_MASK		0x1FFE	/* mask for ITR register value */
-#define I40E_MIN_ITR		     2	/* reg uses 2 usec resolution */
-#define I40E_ITR_100K		    10	/* all values below must be even */
-#define I40E_ITR_50K		    20
-#define I40E_ITR_20K		    50
-#define I40E_ITR_18K		    60
-#define I40E_ITR_8K		   122
-#define I40E_MAX_ITR		  8160	/* maximum value as per datasheet */
-#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
-#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
-#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
-
-#define I40E_ITR_RX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
-#define I40E_ITR_TX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
-
-/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
- * the value of the rate limit is non-zero
- */
-#define INTRL_ENA                  BIT(6)
-#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
-#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
-#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
-#define I40E_INTRL_8K              125     /* 8000 ints/sec */
-#define I40E_INTRL_62K             16      /* 62500 ints/sec */
-#define I40E_INTRL_83K             12      /* 83333 ints/sec */
-
-#define I40E_QUEUE_END_OF_LIST 0x7FF
-
-/* this enum matches hardware bits and is meant to be used by DYN_CTLN
- * registers and QINT registers or more generally anywhere in the manual
- * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
- * register but instead is a special value meaning "don't update" ITR0/1/2.
- */
-enum i40e_dyn_idx_t {
-	I40E_IDX_ITR0 = 0,
-	I40E_IDX_ITR1 = 1,
-	I40E_IDX_ITR2 = 2,
-	I40E_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
-};
-
-/* these are indexes into ITRN registers */
-#define I40E_RX_ITR    I40E_IDX_ITR0
-#define I40E_TX_ITR    I40E_IDX_ITR1
-#define I40E_PE_ITR    I40E_IDX_ITR2
-
-/* Supported RSS offloads */
-#define I40E_DEFAULT_RSS_HENA ( \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
-	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-
-/* Supported Rx Buffer Sizes (a multiple of 128) */
-#define I40E_RXBUFFER_256   256
-#define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
-#define I40E_RXBUFFER_2048  2048
-#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
-#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
-
-/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
- * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
- */
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
-#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
-#define i40e_rx_desc i40e_32byte_rx_desc
-
-#define I40E_RX_DMA_ATTR \
-	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
-
-/* Attempt to maximize the headroom available for incoming frames.  We
- * use a 2K buffer for receives and need 1536/1534 to store the data for
- * the frame.  This leaves us with 512 bytes of room.  From that we need
- * to deduct the space needed for the shared info and the padding needed
- * to IP align the frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- *	 up negative.  In these cases we should fall back to the legacy
- *	 receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define I40E_2K_TOO_SMALL_WITH_PADDING \
-((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
-
-static inline int i40e_compute_pad(int rx_buf_len)
-{
-	int page_size, pad_size;
-
-	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
-	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
-
-	return pad_size;
-}
-
-static inline int i40e_skb_pad(void)
-{
-	int rx_buf_len;
-
-	/* If a 2K buffer cannot handle a standard Ethernet frame then
-	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
-	 *
-	 * For a 3K buffer we need to add enough padding to allow for
-	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
-	 * cache-line alignment.
-	 */
-	if (I40E_2K_TOO_SMALL_WITH_PADDING)
-		rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
-	else
-		rx_buf_len = I40E_RXBUFFER_1536;
-
-	/* if needed make room for NET_IP_ALIGN */
-	rx_buf_len -= NET_IP_ALIGN;
-
-	return i40e_compute_pad(rx_buf_len);
-}
-
-#define I40E_SKB_PAD i40e_skb_pad()
-#else
-#define I40E_2K_TOO_SMALL_WITH_PADDING false
-#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
-
-/**
- * i40e_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
-				     const u64 stat_err_bits)
-{
-	return !!(rx_desc->wb.qword1.status_error_len &
-		  cpu_to_le64(stat_err_bits));
-}
-
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40E_RX_BUFFER_WRITE	32	/* Must be power of 2 */
-#define I40E_RX_INCREMENT(r, i) \
-	do {					\
-		(i)++;				\
-		if ((i) == (r)->count)		\
-			i = 0;			\
-		r->next_to_clean = i;		\
-	} while (0)
-
-#define I40E_RX_NEXT_DESC(r, i, n)		\
-	do {					\
-		(i)++;				\
-		if ((i) == (r)->count)		\
-			i = 0;			\
-		(n) = I40E_RX_DESC((r), (i));	\
-	} while (0)
-
-#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n)		\
-	do {						\
-		I40E_RX_NEXT_DESC((r), (i), (n));	\
-		prefetch((n));				\
-	} while (0)
-
-#define I40E_MAX_BUFFER_TXD	8
-#define I40E_MIN_TX_LEN		17
-
-/* The size limit for a transmit buffer in a descriptor is (16K - 1).
- * In order to align with the read requests we will align the value to
- * the nearest 4K which represents our maximum read request size.
- */
-#define I40E_MAX_READ_REQ_SIZE		4096
-#define I40E_MAX_DATA_PER_TXD		(16 * 1024 - 1)
-#define I40E_MAX_DATA_PER_TXD_ALIGNED \
-	(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
-
-/**
- * i40e_txd_use_count  - estimate the number of descriptors needed for Tx
- * @size: transmit request size in bytes
- *
- * Due to hardware alignment restrictions (4K alignment), we need to
- * assume that we can have no more than 12K of data per descriptor, even
- * though each descriptor can take up to 16K - 1 bytes of aligned memory.
- * Thus, we need to divide by 12K. But division is slow! Instead,
- * we decompose the operation into shifts and one relatively cheap
- * multiply operation.
- *
- * To divide by 12K, we first divide by 4K, then divide by 3:
- *     To divide by 4K, shift right by 12 bits
- *     To divide by 3, multiply by 85, then divide by 256
- *     (Divide by 256 is done by shifting right by 8 bits)
- * Finally, we add one to round up. Because 256 isn't an exact multiple of
- * 3, we'll underestimate near each multiple of 12K. This is actually more
- * accurate as we have 4K - 1 of wiggle room that we can fit into the last
- * segment.  For our purposes this is accurate out to 1M which is orders of
- * magnitude greater than our largest possible GSO size.
- *
- * This would then be implemented as:
- *     return (((size >> 12) * 85) >> 8) + 1;
- *
- * Since multiplication and division are commutative, we can reorder
- * operations into:
- *     return ((size * 85) >> 20) + 1;
- */
-static inline unsigned int i40e_txd_use_count(unsigned int size)
-{
-	return ((size * 85) >> 20) + 1;
-}
-
-/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
-#define I40E_MIN_DESC_PENDING	4
-
-#define I40E_TX_FLAGS_HW_VLAN		BIT(1)
-#define I40E_TX_FLAGS_SW_VLAN		BIT(2)
-#define I40E_TX_FLAGS_TSO		BIT(3)
-#define I40E_TX_FLAGS_IPV4		BIT(4)
-#define I40E_TX_FLAGS_IPV6		BIT(5)
-#define I40E_TX_FLAGS_FCCRC		BIT(6)
-#define I40E_TX_FLAGS_FSO		BIT(7)
-#define I40E_TX_FLAGS_FD_SB		BIT(9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL	BIT(10)
-#define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
-#define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
-#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
-#define I40E_TX_FLAGS_VLAN_SHIFT	16
-
-struct i40e_tx_buffer {
-	struct i40e_tx_desc *next_to_watch;
-	union {
-		struct sk_buff *skb;
-		void *raw_buf;
-	};
-	unsigned int bytecount;
-	unsigned short gso_segs;
-
-	DEFINE_DMA_UNMAP_ADDR(dma);
-	DEFINE_DMA_UNMAP_LEN(len);
-	u32 tx_flags;
-};
-
-struct i40e_rx_buffer {
-	dma_addr_t dma;
-	struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
-	__u32 page_offset;
-#else
-	__u16 page_offset;
-#endif
-	__u16 pagecnt_bias;
-};
-
-struct i40e_queue_stats {
-	u64 packets;
-	u64 bytes;
-};
-
-struct i40e_tx_queue_stats {
-	u64 restart_queue;
-	u64 tx_busy;
-	u64 tx_done_old;
-	u64 tx_linearize;
-	u64 tx_force_wb;
-	int prev_pkt_ctr;
-	u64 tx_lost_interrupt;
-};
-
-struct i40e_rx_queue_stats {
-	u64 non_eop_descs;
-	u64 alloc_page_failed;
-	u64 alloc_buff_failed;
-	u64 page_reuse_count;
-	u64 realloc_count;
-};
-
-enum i40e_ring_state_t {
-	__I40E_TX_FDIR_INIT_DONE,
-	__I40E_TX_XPS_INIT_DONE,
-	__I40E_RING_STATE_NBITS /* must be last */
-};
-
-/* some useful defines for virtchannel interface, which
- * is the only remaining user of header split
- */
-#define I40E_RX_DTYPE_NO_SPLIT      0
-#define I40E_RX_DTYPE_HEADER_SPLIT  1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
-#define I40E_RX_SPLIT_L2      0x1
-#define I40E_RX_SPLIT_IP      0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP    0x8
-
-/* struct that defines a descriptor ring, associated with a VSI */
-struct i40e_ring {
-	struct i40e_ring *next;		/* pointer to next ring in q_vector */
-	void *desc;			/* Descriptor ring memory */
-	struct device *dev;		/* Used for DMA mapping */
-	struct net_device *netdev;	/* netdev ring maps to */
-	union {
-		struct i40e_tx_buffer *tx_bi;
-		struct i40e_rx_buffer *rx_bi;
-	};
-	DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
-	u16 queue_index;		/* Queue number of ring */
-	u8 dcb_tc;			/* Traffic class of ring */
-	u8 __iomem *tail;
-
-	/* high bit set means dynamic, use accessors routines to read/write.
-	 * hardware only supports 2us resolution for the ITR registers.
-	 * these values always store the USER setting, and must be converted
-	 * before programming to a register.
-	 */
-	u16 itr_setting;
-
-	u16 count;			/* Number of descriptors */
-	u16 reg_idx;			/* HW register index of the ring */
-	u16 rx_buf_len;
-
-	/* used in interrupt processing */
-	u16 next_to_use;
-	u16 next_to_clean;
-
-	u8 atr_sample_rate;
-	u8 atr_count;
-
-	bool ring_active;		/* is ring online or not */
-	bool arm_wb;		/* do something to arm write back */
-	u8 packet_stride;
-
-	u16 flags;
-#define I40E_TXR_FLAGS_WB_ON_ITR		BIT(0)
-#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED	BIT(1)
-
-	/* stats structs */
-	struct i40e_queue_stats	stats;
-	struct u64_stats_sync syncp;
-	union {
-		struct i40e_tx_queue_stats tx_stats;
-		struct i40e_rx_queue_stats rx_stats;
-	};
-
-	unsigned int size;		/* length of descriptor ring in bytes */
-	dma_addr_t dma;			/* physical address of ring */
-
-	struct i40e_vsi *vsi;		/* Backreference to associated VSI */
-	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
-
-	struct rcu_head rcu;		/* to avoid race on free */
-	u16 next_to_alloc;
-	struct sk_buff *skb;		/* When i40evf_clean_rx_ring_irq() must
-					 * return before it sees the EOP for
-					 * the current packet, we save that skb
-					 * here and resume receiving this
-					 * packet the next time
-					 * i40evf_clean_rx_ring_irq() is called
-					 * for this ring.
-					 */
-} ____cacheline_internodealigned_in_smp;
-
-static inline bool ring_uses_build_skb(struct i40e_ring *ring)
-{
-	return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
-}
-
-static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
-{
-	ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
-
-static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
-{
-	ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
-
-#define I40E_ITR_ADAPTIVE_MIN_INC	0x0002
-#define I40E_ITR_ADAPTIVE_MIN_USECS	0x0002
-#define I40E_ITR_ADAPTIVE_MAX_USECS	0x007e
-#define I40E_ITR_ADAPTIVE_LATENCY	0x8000
-#define I40E_ITR_ADAPTIVE_BULK		0x0000
-#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
-
-struct i40e_ring_container {
-	struct i40e_ring *ring;		/* pointer to linked list of ring(s) */
-	unsigned long next_update;	/* jiffies value of next update */
-	unsigned int total_bytes;	/* total bytes processed this int */
-	unsigned int total_packets;	/* total packets processed this int */
-	u16 count;
-	u16 target_itr;			/* target ITR setting for ring(s) */
-	u16 current_itr;		/* current ITR setting for ring(s) */
-};
-
-/* iterator for handling rings in ring container */
-#define i40e_for_each_ring(pos, head) \
-	for (pos = (head).ring; pos != NULL; pos = pos->next)
-
-static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
-{
-#if (PAGE_SIZE < 8192)
-	if (ring->rx_buf_len > (PAGE_SIZE / 2))
-		return 1;
-#endif
-	return 0;
-}
-
-#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
-
-bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
-netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
-void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
-int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
-int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
-void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
-void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
-int i40evf_napi_poll(struct napi_struct *napi, int budget);
-void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
-u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
-void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
-int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
-bool __i40evf_chk_linearize(struct sk_buff *skb);
-
-/**
- * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
- * @skb:     send buffer
- * @tx_ring: ring to send buffer on
- *
- * Returns number of data descriptors needed for this skb. Returns 0 to indicate
- * there is not enough descriptors available in this ring since we need at least
- * one descriptor.
- **/
-static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
-{
-	const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
-	int count = 0, size = skb_headlen(skb);
-
-	for (;;) {
-		count += i40e_txd_use_count(size);
-
-		if (!nr_frags--)
-			break;
-
-		size = skb_frag_size(frag++);
-	}
-
-	return count;
-}
-
-/**
- * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
-	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
-		return 0;
-	return __i40evf_maybe_stop_tx(tx_ring, size);
-}
-
-/**
- * i40e_chk_linearize - Check if there are more than 8 fragments per packet
- * @skb:      send buffer
- * @count:    number of buffers used
- *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
- **/
-static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
-{
-	/* Both TSO and single send will work if count is less than 8 */
-	if (likely(count < I40E_MAX_BUFFER_TXD))
-		return false;
-
-	if (skb_is_gso(skb))
-		return __i40evf_chk_linearize(skb);
-
-	/* we can support up to 8 data buffers for a single send */
-	return count != I40E_MAX_BUFFER_TXD;
-}
-/**
- * @ring: Tx ring to find the netdev equivalent of
- **/
-static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
-{
-	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
-}
-#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
deleted file mode 100644
index 094387db3c11..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ /dev/null
@@ -1,1496 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_TYPE_H_
-#define _I40E_TYPE_H_
-
-#include "i40e_status.h"
-#include "i40e_osdep.h"
-#include "i40e_register.h"
-#include "i40e_adminq.h"
-#include "i40e_hmc.h"
-#include "i40e_lan_hmc.h"
-#include "i40e_devids.h"
-
-/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
-
-#define I40E_MAX_VSI_QP			16
-#define I40E_MAX_VF_VSI			3
-#define I40E_MAX_CHAINED_RX_BUFFERS	5
-#define I40E_MAX_PF_UDP_OFFLOAD_PORTS	16
-
-/* Max default timeout in ms, */
-#define I40E_MAX_NVM_TIMEOUT		18000
-
-/* Max timeout in ms for the phy to respond */
-#define I40E_MAX_PHY_TIMEOUT		500
-
-/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
-#define I40E_MS_TO_GTIME(time)		((time) * 1000)
-
-/* forward declaration */
-struct i40e_hw;
-typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-
-/* Data type manipulation macros. */
-
-#define I40E_DESC_UNUSED(R)	\
-	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
-	(R)->next_to_clean - (R)->next_to_use - 1)
-
-/* bitfields for Tx queue mapping in QTX_CTL */
-#define I40E_QTX_CTL_VF_QUEUE	0x0
-#define I40E_QTX_CTL_VM_QUEUE	0x1
-#define I40E_QTX_CTL_PF_QUEUE	0x2
-
-/* debug masks - set these bits in hw->debug_mask to control output */
-enum i40e_debug_mask {
-	I40E_DEBUG_INIT			= 0x00000001,
-	I40E_DEBUG_RELEASE		= 0x00000002,
-
-	I40E_DEBUG_LINK			= 0x00000010,
-	I40E_DEBUG_PHY			= 0x00000020,
-	I40E_DEBUG_HMC			= 0x00000040,
-	I40E_DEBUG_NVM			= 0x00000080,
-	I40E_DEBUG_LAN			= 0x00000100,
-	I40E_DEBUG_FLOW			= 0x00000200,
-	I40E_DEBUG_DCB			= 0x00000400,
-	I40E_DEBUG_DIAG			= 0x00000800,
-	I40E_DEBUG_FD			= 0x00001000,
-	I40E_DEBUG_PACKAGE		= 0x00002000,
-
-	I40E_DEBUG_AQ_MESSAGE		= 0x01000000,
-	I40E_DEBUG_AQ_DESCRIPTOR	= 0x02000000,
-	I40E_DEBUG_AQ_DESC_BUFFER	= 0x04000000,
-	I40E_DEBUG_AQ_COMMAND		= 0x06000000,
-	I40E_DEBUG_AQ			= 0x0F000000,
-
-	I40E_DEBUG_USER			= 0xF0000000,
-
-	I40E_DEBUG_ALL			= 0xFFFFFFFF
-};
-
-/* These are structs for managing the hardware information and the operations.
- * The structures of function pointers are filled out at init time when we
- * know for sure exactly which hardware we're working with.  This gives us the
- * flexibility of using the same main driver code but adapting to slightly
- * different hardware needs as new parts are developed.  For this architecture,
- * the Firmware and AdminQ are intended to insulate the driver from most of the
- * future changes, but these structures will also do part of the job.
- */
-enum i40e_mac_type {
-	I40E_MAC_UNKNOWN = 0,
-	I40E_MAC_XL710,
-	I40E_MAC_VF,
-	I40E_MAC_X722,
-	I40E_MAC_X722_VF,
-	I40E_MAC_GENERIC,
-};
-
-enum i40e_media_type {
-	I40E_MEDIA_TYPE_UNKNOWN = 0,
-	I40E_MEDIA_TYPE_FIBER,
-	I40E_MEDIA_TYPE_BASET,
-	I40E_MEDIA_TYPE_BACKPLANE,
-	I40E_MEDIA_TYPE_CX4,
-	I40E_MEDIA_TYPE_DA,
-	I40E_MEDIA_TYPE_VIRTUAL
-};
-
-enum i40e_fc_mode {
-	I40E_FC_NONE = 0,
-	I40E_FC_RX_PAUSE,
-	I40E_FC_TX_PAUSE,
-	I40E_FC_FULL,
-	I40E_FC_PFC,
-	I40E_FC_DEFAULT
-};
-
-enum i40e_set_fc_aq_failures {
-	I40E_SET_FC_AQ_FAIL_NONE = 0,
-	I40E_SET_FC_AQ_FAIL_GET = 1,
-	I40E_SET_FC_AQ_FAIL_SET = 2,
-	I40E_SET_FC_AQ_FAIL_UPDATE = 4,
-	I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
-};
-
-enum i40e_vsi_type {
-	I40E_VSI_MAIN	= 0,
-	I40E_VSI_VMDQ1	= 1,
-	I40E_VSI_VMDQ2	= 2,
-	I40E_VSI_CTRL	= 3,
-	I40E_VSI_FCOE	= 4,
-	I40E_VSI_MIRROR	= 5,
-	I40E_VSI_SRIOV	= 6,
-	I40E_VSI_FDIR	= 7,
-	I40E_VSI_TYPE_UNKNOWN
-};
-
-enum i40e_queue_type {
-	I40E_QUEUE_TYPE_RX = 0,
-	I40E_QUEUE_TYPE_TX,
-	I40E_QUEUE_TYPE_PE_CEQ,
-	I40E_QUEUE_TYPE_UNKNOWN
-};
-
-struct i40e_link_status {
-	enum i40e_aq_phy_type phy_type;
-	enum i40e_aq_link_speed link_speed;
-	u8 link_info;
-	u8 an_info;
-	u8 req_fec_info;
-	u8 fec_info;
-	u8 ext_info;
-	u8 loopback;
-	/* is Link Status Event notification to SW enabled */
-	bool lse_enable;
-	u16 max_frame_size;
-	bool crc_enable;
-	u8 pacing;
-	u8 requested_speeds;
-	u8 module_type[3];
-	/* 1st byte: module identifier */
-#define I40E_MODULE_TYPE_SFP		0x03
-#define I40E_MODULE_TYPE_QSFP		0x0D
-	/* 2nd byte: ethernet compliance codes for 10/40G */
-#define I40E_MODULE_TYPE_40G_ACTIVE	0x01
-#define I40E_MODULE_TYPE_40G_LR4	0x02
-#define I40E_MODULE_TYPE_40G_SR4	0x04
-#define I40E_MODULE_TYPE_40G_CR4	0x08
-#define I40E_MODULE_TYPE_10G_BASE_SR	0x10
-#define I40E_MODULE_TYPE_10G_BASE_LR	0x20
-#define I40E_MODULE_TYPE_10G_BASE_LRM	0x40
-#define I40E_MODULE_TYPE_10G_BASE_ER	0x80
-	/* 3rd byte: ethernet compliance codes for 1G */
-#define I40E_MODULE_TYPE_1000BASE_SX	0x01
-#define I40E_MODULE_TYPE_1000BASE_LX	0x02
-#define I40E_MODULE_TYPE_1000BASE_CX	0x04
-#define I40E_MODULE_TYPE_1000BASE_T	0x08
-};
-
-struct i40e_phy_info {
-	struct i40e_link_status link_info;
-	struct i40e_link_status link_info_old;
-	bool get_link_info;
-	enum i40e_media_type media_type;
-	/* all the phy types the NVM is capable of */
-	u64 phy_types;
-};
-
-#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
-#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
-#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
-#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
-#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
-#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
-#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
-#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
-#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
-#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
-#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
-#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
-#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
-#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
-#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
-#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
-#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
-#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
-#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
-#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
-#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
-#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
-#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
-#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
-#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
-#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
-#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
-				BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
-#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
-/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
- * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
- * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
- * a shift is needed to adjust for this with values larger than 31. The
- * only affected values are I40E_PHY_TYPE_25GBASE_*.
- */
-#define I40E_PHY_TYPE_OFFSET 1
-#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
-					     I40E_PHY_TYPE_OFFSET)
-#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
-					     I40E_PHY_TYPE_OFFSET)
-#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
-					     I40E_PHY_TYPE_OFFSET)
-#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
-					     I40E_PHY_TYPE_OFFSET)
-#define I40E_HW_CAP_MAX_GPIO			30
-/* Capabilities of a PF or a VF or the whole device */
-struct i40e_hw_capabilities {
-	u32  switch_mode;
-#define I40E_NVM_IMAGE_TYPE_EVB		0x0
-#define I40E_NVM_IMAGE_TYPE_CLOUD	0x2
-#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD	0x3
-
-	u32  management_mode;
-	u32  mng_protocols_over_mctp;
-#define I40E_MNG_PROTOCOL_PLDM		0x2
-#define I40E_MNG_PROTOCOL_OEM_COMMANDS	0x4
-#define I40E_MNG_PROTOCOL_NCSI		0x8
-	u32  npar_enable;
-	u32  os2bmc;
-	u32  valid_functions;
-	bool sr_iov_1_1;
-	bool vmdq;
-	bool evb_802_1_qbg; /* Edge Virtual Bridging */
-	bool evb_802_1_qbh; /* Bridge Port Extension */
-	bool dcb;
-	bool fcoe;
-	bool iscsi; /* Indicates iSCSI enabled */
-	bool flex10_enable;
-	bool flex10_capable;
-	u32  flex10_mode;
-#define I40E_FLEX10_MODE_UNKNOWN	0x0
-#define I40E_FLEX10_MODE_DCC		0x1
-#define I40E_FLEX10_MODE_DCI		0x2
-
-	u32 flex10_status;
-#define I40E_FLEX10_STATUS_DCC_ERROR	0x1
-#define I40E_FLEX10_STATUS_VC_MODE	0x2
-
-	bool sec_rev_disabled;
-	bool update_disabled;
-#define I40E_NVM_MGMT_SEC_REV_DISABLED	0x1
-#define I40E_NVM_MGMT_UPDATE_DISABLED	0x2
-
-	bool mgmt_cem;
-	bool ieee_1588;
-	bool iwarp;
-	bool fd;
-	u32 fd_filters_guaranteed;
-	u32 fd_filters_best_effort;
-	bool rss;
-	u32 rss_table_size;
-	u32 rss_table_entry_width;
-	bool led[I40E_HW_CAP_MAX_GPIO];
-	bool sdp[I40E_HW_CAP_MAX_GPIO];
-	u32 nvm_image_type;
-	u32 num_flow_director_filters;
-	u32 num_vfs;
-	u32 vf_base_id;
-	u32 num_vsis;
-	u32 num_rx_qp;
-	u32 num_tx_qp;
-	u32 base_queue;
-	u32 num_msix_vectors;
-	u32 num_msix_vectors_vf;
-	u32 led_pin_num;
-	u32 sdp_pin_num;
-	u32 mdio_port_num;
-	u32 mdio_port_mode;
-	u8 rx_buf_chain_len;
-	u32 enabled_tcmap;
-	u32 maxtc;
-	u64 wr_csr_prot;
-};
-
-struct i40e_mac_info {
-	enum i40e_mac_type type;
-	u8 addr[ETH_ALEN];
-	u8 perm_addr[ETH_ALEN];
-	u8 san_addr[ETH_ALEN];
-	u16 max_fcoeq;
-};
-
-enum i40e_aq_resources_ids {
-	I40E_NVM_RESOURCE_ID = 1
-};
-
-enum i40e_aq_resource_access_type {
-	I40E_RESOURCE_READ = 1,
-	I40E_RESOURCE_WRITE
-};
-
-struct i40e_nvm_info {
-	u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
-	u32 timeout;              /* [ms] */
-	u16 sr_size;              /* Shadow RAM size in words */
-	bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
-	u16 version;              /* NVM package version */
-	u32 eetrack;              /* NVM data version */
-	u32 oem_ver;              /* OEM version info */
-};
-
-/* definitions used in NVM update support */
-
-enum i40e_nvmupd_cmd {
-	I40E_NVMUPD_INVALID,
-	I40E_NVMUPD_READ_CON,
-	I40E_NVMUPD_READ_SNT,
-	I40E_NVMUPD_READ_LCB,
-	I40E_NVMUPD_READ_SA,
-	I40E_NVMUPD_WRITE_ERA,
-	I40E_NVMUPD_WRITE_CON,
-	I40E_NVMUPD_WRITE_SNT,
-	I40E_NVMUPD_WRITE_LCB,
-	I40E_NVMUPD_WRITE_SA,
-	I40E_NVMUPD_CSUM_CON,
-	I40E_NVMUPD_CSUM_SA,
-	I40E_NVMUPD_CSUM_LCB,
-	I40E_NVMUPD_STATUS,
-	I40E_NVMUPD_EXEC_AQ,
-	I40E_NVMUPD_GET_AQ_RESULT,
-	I40E_NVMUPD_GET_AQ_EVENT,
-};
-
-enum i40e_nvmupd_state {
-	I40E_NVMUPD_STATE_INIT,
-	I40E_NVMUPD_STATE_READING,
-	I40E_NVMUPD_STATE_WRITING,
-	I40E_NVMUPD_STATE_INIT_WAIT,
-	I40E_NVMUPD_STATE_WRITE_WAIT,
-	I40E_NVMUPD_STATE_ERROR
-};
-
-/* nvm_access definition and its masks/shifts need to be accessible to
- * application, core driver, and shared code.  Where is the right file?
- */
-#define I40E_NVM_READ	0xB
-#define I40E_NVM_WRITE	0xC
-
-#define I40E_NVM_MOD_PNT_MASK 0xFF
-
-#define I40E_NVM_TRANS_SHIFT			8
-#define I40E_NVM_TRANS_MASK			(0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_PRESERVATION_FLAGS_SHIFT	12
-#define I40E_NVM_PRESERVATION_FLAGS_MASK \
-				(0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
-#define I40E_NVM_PRESERVATION_FLAGS_SELECTED	0x01
-#define I40E_NVM_PRESERVATION_FLAGS_ALL		0x02
-#define I40E_NVM_CON				0x0
-#define I40E_NVM_SNT				0x1
-#define I40E_NVM_LCB				0x2
-#define I40E_NVM_SA				(I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA				0x4
-#define I40E_NVM_CSUM				0x8
-#define I40E_NVM_AQE				0xe
-#define I40E_NVM_EXEC				0xf
-
-#define I40E_NVM_ADAPT_SHIFT	16
-#define I40E_NVM_ADAPT_MASK	(0xffff << I40E_NVM_ADAPT_SHIFT)
-
-#define I40E_NVMUPD_MAX_DATA	4096
-#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
-
-struct i40e_nvm_access {
-	u32 command;
-	u32 config;
-	u32 offset;	/* in bytes */
-	u32 data_size;	/* in bytes */
-	u8 data[1];
-};
-
-/* (Q)SFP module access definitions */
-#define I40E_I2C_EEPROM_DEV_ADDR	0xA0
-#define I40E_I2C_EEPROM_DEV_ADDR2	0xA2
-#define I40E_MODULE_TYPE_ADDR		0x00
-#define I40E_MODULE_REVISION_ADDR	0x01
-#define I40E_MODULE_SFF_8472_COMP	0x5E
-#define I40E_MODULE_SFF_8472_SWAP	0x5C
-#define I40E_MODULE_SFF_ADDR_MODE	0x04
-#define I40E_MODULE_TYPE_QSFP_PLUS	0x0D
-#define I40E_MODULE_TYPE_QSFP28		0x11
-#define I40E_MODULE_QSFP_MAX_LEN	640
-
-/* PCI bus types */
-enum i40e_bus_type {
-	i40e_bus_type_unknown = 0,
-	i40e_bus_type_pci,
-	i40e_bus_type_pcix,
-	i40e_bus_type_pci_express,
-	i40e_bus_type_reserved
-};
-
-/* PCI bus speeds */
-enum i40e_bus_speed {
-	i40e_bus_speed_unknown	= 0,
-	i40e_bus_speed_33	= 33,
-	i40e_bus_speed_66	= 66,
-	i40e_bus_speed_100	= 100,
-	i40e_bus_speed_120	= 120,
-	i40e_bus_speed_133	= 133,
-	i40e_bus_speed_2500	= 2500,
-	i40e_bus_speed_5000	= 5000,
-	i40e_bus_speed_8000	= 8000,
-	i40e_bus_speed_reserved
-};
-
-/* PCI bus widths */
-enum i40e_bus_width {
-	i40e_bus_width_unknown	= 0,
-	i40e_bus_width_pcie_x1	= 1,
-	i40e_bus_width_pcie_x2	= 2,
-	i40e_bus_width_pcie_x4	= 4,
-	i40e_bus_width_pcie_x8	= 8,
-	i40e_bus_width_32	= 32,
-	i40e_bus_width_64	= 64,
-	i40e_bus_width_reserved
-};
-
-/* Bus parameters */
-struct i40e_bus_info {
-	enum i40e_bus_speed speed;
-	enum i40e_bus_width width;
-	enum i40e_bus_type type;
-
-	u16 func;
-	u16 device;
-	u16 lan_id;
-	u16 bus_id;
-};
-
-/* Flow control (FC) parameters */
-struct i40e_fc_info {
-	enum i40e_fc_mode current_mode; /* FC mode in effect */
-	enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
-};
-
-#define I40E_MAX_TRAFFIC_CLASS		8
-#define I40E_MAX_USER_PRIORITY		8
-#define I40E_DCBX_MAX_APPS		32
-#define I40E_LLDPDU_SIZE		1500
-
-/* IEEE 802.1Qaz ETS Configuration data */
-struct i40e_ieee_ets_config {
-	u8 willing;
-	u8 cbs;
-	u8 maxtcs;
-	u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
-	u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
-	u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
-};
-
-/* IEEE 802.1Qaz ETS Recommendation data */
-struct i40e_ieee_ets_recommend {
-	u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
-	u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
-	u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
-};
-
-/* IEEE 802.1Qaz PFC Configuration data */
-struct i40e_ieee_pfc_config {
-	u8 willing;
-	u8 mbc;
-	u8 pfccap;
-	u8 pfcenable;
-};
-
-/* IEEE 802.1Qaz Application Priority data */
-struct i40e_ieee_app_priority_table {
-	u8  priority;
-	u8  selector;
-	u16 protocolid;
-};
-
-struct i40e_dcbx_config {
-	u32 numapps;
-	u32 tlv_status; /* CEE mode TLV status */
-	struct i40e_ieee_ets_config etscfg;
-	struct i40e_ieee_ets_recommend etsrec;
-	struct i40e_ieee_pfc_config pfc;
-	struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
-};
-
-/* Port hardware description */
-struct i40e_hw {
-	u8 __iomem *hw_addr;
-	void *back;
-
-	/* subsystem structs */
-	struct i40e_phy_info phy;
-	struct i40e_mac_info mac;
-	struct i40e_bus_info bus;
-	struct i40e_nvm_info nvm;
-	struct i40e_fc_info fc;
-
-	/* pci info */
-	u16 device_id;
-	u16 vendor_id;
-	u16 subsystem_device_id;
-	u16 subsystem_vendor_id;
-	u8 revision_id;
-	u8 port;
-	bool adapter_stopped;
-
-	/* capabilities for entire device and PCI func */
-	struct i40e_hw_capabilities dev_caps;
-	struct i40e_hw_capabilities func_caps;
-
-	/* Flow Director shared filter space */
-	u16 fdir_shared_filter_count;
-
-	/* device profile info */
-	u8  pf_id;
-	u16 main_vsi_seid;
-
-	/* for multi-function MACs */
-	u16 partition_id;
-	u16 num_partitions;
-	u16 num_ports;
-
-	/* Closest numa node to the device */
-	u16 numa_node;
-
-	/* Admin Queue info */
-	struct i40e_adminq_info aq;
-
-	/* state of nvm update process */
-	enum i40e_nvmupd_state nvmupd_state;
-	struct i40e_aq_desc nvm_wb_desc;
-	struct i40e_aq_desc nvm_aq_event_desc;
-	struct i40e_virt_mem nvm_buff;
-	bool nvm_release_on_done;
-	u16 nvm_wait_opcode;
-
-	/* HMC info */
-	struct i40e_hmc_info hmc; /* HMC info struct */
-
-	/* LLDP/DCBX Status */
-	u16 dcbx_status;
-
-#define I40E_HW_FLAG_802_1AD_CAPABLE        BIT_ULL(1)
-#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE  BIT_ULL(2)
-
-	/* DCBX info */
-	struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
-	struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
-	struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
-
-	/* Used in set switch config AQ command */
-	u16 switch_tag;
-	u16 first_tag;
-	u16 second_tag;
-
-	/* debug mask */
-	u32 debug_mask;
-	char err_str[16];
-};
-
-static inline bool i40e_is_vf(struct i40e_hw *hw)
-{
-	return (hw->mac.type == I40E_MAC_VF ||
-		hw->mac.type == I40E_MAC_X722_VF);
-}
-
-struct i40e_driver_version {
-	u8 major_version;
-	u8 minor_version;
-	u8 build_version;
-	u8 subbuild_version;
-	u8 driver_string[32];
-};
-
-/* RX Descriptors */
-union i40e_16byte_rx_desc {
-	struct {
-		__le64 pkt_addr; /* Packet buffer address */
-		__le64 hdr_addr; /* Header buffer address */
-	} read;
-	struct {
-		struct {
-			struct {
-				union {
-					__le16 mirroring_status;
-					__le16 fcoe_ctx_id;
-				} mirr_fcoe;
-				__le16 l2tag1;
-			} lo_dword;
-			union {
-				__le32 rss; /* RSS Hash */
-				__le32 fd_id; /* Flow director filter id */
-				__le32 fcoe_param; /* FCoE DDP Context id */
-			} hi_dword;
-		} qword0;
-		struct {
-			/* ext status/error/pktype/length */
-			__le64 status_error_len;
-		} qword1;
-	} wb;  /* writeback */
-};
-
-union i40e_32byte_rx_desc {
-	struct {
-		__le64  pkt_addr; /* Packet buffer address */
-		__le64  hdr_addr; /* Header buffer address */
-			/* bit 0 of hdr_buffer_addr is DD bit */
-		__le64  rsvd1;
-		__le64  rsvd2;
-	} read;
-	struct {
-		struct {
-			struct {
-				union {
-					__le16 mirroring_status;
-					__le16 fcoe_ctx_id;
-				} mirr_fcoe;
-				__le16 l2tag1;
-			} lo_dword;
-			union {
-				__le32 rss; /* RSS Hash */
-				__le32 fcoe_param; /* FCoE DDP Context id */
-				/* Flow director filter id in case of
-				 * Programming status desc WB
-				 */
-				__le32 fd_id;
-			} hi_dword;
-		} qword0;
-		struct {
-			/* status/error/pktype/length */
-			__le64 status_error_len;
-		} qword1;
-		struct {
-			__le16 ext_status; /* extended status */
-			__le16 rsvd;
-			__le16 l2tag2_1;
-			__le16 l2tag2_2;
-		} qword2;
-		struct {
-			union {
-				__le32 flex_bytes_lo;
-				__le32 pe_status;
-			} lo_dword;
-			union {
-				__le32 flex_bytes_hi;
-				__le32 fd_id;
-			} hi_dword;
-		} qword3;
-	} wb;  /* writeback */
-};
-
-enum i40e_rx_desc_status_bits {
-	/* Note: These are predefined bit offsets */
-	I40E_RX_DESC_STATUS_DD_SHIFT		= 0,
-	I40E_RX_DESC_STATUS_EOF_SHIFT		= 1,
-	I40E_RX_DESC_STATUS_L2TAG1P_SHIFT	= 2,
-	I40E_RX_DESC_STATUS_L3L4P_SHIFT		= 3,
-	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
-	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
-	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
-	/* Note: Bit 8 is reserved in X710 and XL710 */
-	I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT	= 8,
-	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
-	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
-	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
-	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
-	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
-	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
-	/* Note: For non-tunnel packets INT_UDP_0 is the right status for
-	 * UDP header
-	 */
-	I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT	= 18,
-	I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
-};
-
-#define I40E_RXD_QW1_STATUS_SHIFT	0
-#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
-					 << I40E_RXD_QW1_STATUS_SHIFT)
-
-#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x3UL << \
-					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
-
-#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
-				    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
-
-enum i40e_rx_desc_fltstat_values {
-	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
-	I40E_RX_DESC_FLTSTAT_RSV_FD_ID	= 1, /* 16byte desc? FD_ID : RSV */
-	I40E_RX_DESC_FLTSTAT_RSV	= 2,
-	I40E_RX_DESC_FLTSTAT_RSS_HASH	= 3,
-};
-
-#define I40E_RXD_QW1_ERROR_SHIFT	19
-#define I40E_RXD_QW1_ERROR_MASK		(0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
-
-enum i40e_rx_desc_error_bits {
-	/* Note: These are predefined bit offsets */
-	I40E_RX_DESC_ERROR_RXE_SHIFT		= 0,
-	I40E_RX_DESC_ERROR_RECIPE_SHIFT		= 1,
-	I40E_RX_DESC_ERROR_HBO_SHIFT		= 2,
-	I40E_RX_DESC_ERROR_L3L4E_SHIFT		= 3, /* 3 BITS */
-	I40E_RX_DESC_ERROR_IPE_SHIFT		= 3,
-	I40E_RX_DESC_ERROR_L4E_SHIFT		= 4,
-	I40E_RX_DESC_ERROR_EIPE_SHIFT		= 5,
-	I40E_RX_DESC_ERROR_OVERSIZE_SHIFT	= 6,
-	I40E_RX_DESC_ERROR_PPRS_SHIFT		= 7
-};
-
-enum i40e_rx_desc_error_l3l4e_fcoe_masks {
-	I40E_RX_DESC_ERROR_L3L4E_NONE		= 0,
-	I40E_RX_DESC_ERROR_L3L4E_PROT		= 1,
-	I40E_RX_DESC_ERROR_L3L4E_FC		= 2,
-	I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR	= 3,
-	I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN	= 4
-};
-
-#define I40E_RXD_QW1_PTYPE_SHIFT	30
-#define I40E_RXD_QW1_PTYPE_MASK		(0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
-
-/* Packet type non-ip values */
-enum i40e_rx_l2_ptype {
-	I40E_RX_PTYPE_L2_RESERVED			= 0,
-	I40E_RX_PTYPE_L2_MAC_PAY2			= 1,
-	I40E_RX_PTYPE_L2_TIMESYNC_PAY2			= 2,
-	I40E_RX_PTYPE_L2_FIP_PAY2			= 3,
-	I40E_RX_PTYPE_L2_OUI_PAY2			= 4,
-	I40E_RX_PTYPE_L2_MACCNTRL_PAY2			= 5,
-	I40E_RX_PTYPE_L2_LLDP_PAY2			= 6,
-	I40E_RX_PTYPE_L2_ECP_PAY2			= 7,
-	I40E_RX_PTYPE_L2_EVB_PAY2			= 8,
-	I40E_RX_PTYPE_L2_QCN_PAY2			= 9,
-	I40E_RX_PTYPE_L2_EAPOL_PAY2			= 10,
-	I40E_RX_PTYPE_L2_ARP				= 11,
-	I40E_RX_PTYPE_L2_FCOE_PAY3			= 12,
-	I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3		= 13,
-	I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3		= 14,
-	I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3		= 15,
-	I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA		= 16,
-	I40E_RX_PTYPE_L2_FCOE_VFT_PAY3			= 17,
-	I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA		= 18,
-	I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY			= 19,
-	I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP			= 20,
-	I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER		= 21,
-	I40E_RX_PTYPE_GRENAT4_MAC_PAY3			= 58,
-	I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4	= 87,
-	I40E_RX_PTYPE_GRENAT6_MAC_PAY3			= 124,
-	I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4	= 153
-};
-
-struct i40e_rx_ptype_decoded {
-	u32 ptype:8;
-	u32 known:1;
-	u32 outer_ip:1;
-	u32 outer_ip_ver:1;
-	u32 outer_frag:1;
-	u32 tunnel_type:3;
-	u32 tunnel_end_prot:2;
-	u32 tunnel_end_frag:1;
-	u32 inner_prot:4;
-	u32 payload_layer:3;
-};
-
-enum i40e_rx_ptype_outer_ip {
-	I40E_RX_PTYPE_OUTER_L2	= 0,
-	I40E_RX_PTYPE_OUTER_IP	= 1
-};
-
-enum i40e_rx_ptype_outer_ip_ver {
-	I40E_RX_PTYPE_OUTER_NONE	= 0,
-	I40E_RX_PTYPE_OUTER_IPV4	= 0,
-	I40E_RX_PTYPE_OUTER_IPV6	= 1
-};
-
-enum i40e_rx_ptype_outer_fragmented {
-	I40E_RX_PTYPE_NOT_FRAG	= 0,
-	I40E_RX_PTYPE_FRAG	= 1
-};
-
-enum i40e_rx_ptype_tunnel_type {
-	I40E_RX_PTYPE_TUNNEL_NONE		= 0,
-	I40E_RX_PTYPE_TUNNEL_IP_IP		= 1,
-	I40E_RX_PTYPE_TUNNEL_IP_GRENAT		= 2,
-	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC	= 3,
-	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN	= 4,
-};
-
-enum i40e_rx_ptype_tunnel_end_prot {
-	I40E_RX_PTYPE_TUNNEL_END_NONE	= 0,
-	I40E_RX_PTYPE_TUNNEL_END_IPV4	= 1,
-	I40E_RX_PTYPE_TUNNEL_END_IPV6	= 2,
-};
-
-enum i40e_rx_ptype_inner_prot {
-	I40E_RX_PTYPE_INNER_PROT_NONE		= 0,
-	I40E_RX_PTYPE_INNER_PROT_UDP		= 1,
-	I40E_RX_PTYPE_INNER_PROT_TCP		= 2,
-	I40E_RX_PTYPE_INNER_PROT_SCTP		= 3,
-	I40E_RX_PTYPE_INNER_PROT_ICMP		= 4,
-	I40E_RX_PTYPE_INNER_PROT_TIMESYNC	= 5
-};
-
-enum i40e_rx_ptype_payload_layer {
-	I40E_RX_PTYPE_PAYLOAD_LAYER_NONE	= 0,
-	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2	= 1,
-	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3	= 2,
-	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4	= 3,
-};
-
-#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT	38
-#define I40E_RXD_QW1_LENGTH_PBUF_MASK	(0x3FFFULL << \
-					 I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
-
-#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT	52
-#define I40E_RXD_QW1_LENGTH_HBUF_MASK	(0x7FFULL << \
-					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
-
-#define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK	BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
-
-enum i40e_rx_desc_ext_status_bits {
-	/* Note: These are predefined bit offsets */
-	I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT	= 0,
-	I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT	= 1,
-	I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT	= 2, /* 2 BITS */
-	I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT	= 4, /* 2 BITS */
-	I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT	= 9,
-	I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT	= 10,
-	I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT	= 11,
-};
-
-enum i40e_rx_desc_pe_status_bits {
-	/* Note: These are predefined bit offsets */
-	I40E_RX_DESC_PE_STATUS_QPID_SHIFT	= 0, /* 18 BITS */
-	I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT	= 0, /* 16 BITS */
-	I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT	= 16, /* 8 BITS */
-	I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT	= 24,
-	I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT	= 25,
-	I40E_RX_DESC_PE_STATUS_PORTV_SHIFT	= 26,
-	I40E_RX_DESC_PE_STATUS_URG_SHIFT	= 27,
-	I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT	= 28,
-	I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT	= 29
-};
-
-#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT		38
-#define I40E_RX_PROG_STATUS_DESC_LENGTH			0x2000000
-
-#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT	2
-#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK	(0x7UL << \
-				I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
-
-#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT	19
-#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK		(0x3FUL << \
-				I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
-
-enum i40e_rx_prog_status_desc_status_bits {
-	/* Note: These are predefined bit offsets */
-	I40E_RX_PROG_STATUS_DESC_DD_SHIFT	= 0,
-	I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT	= 2 /* 3 BITS */
-};
-
-enum i40e_rx_prog_status_desc_prog_id_masks {
-	I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS	= 1,
-	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS	= 2,
-	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS	= 4,
-};
-
-enum i40e_rx_prog_status_desc_error_bits {
-	/* Note: These are predefined bit offsets */
-	I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT	= 0,
-	I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT	= 1,
-	I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT	= 2,
-	I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT	= 3
-};
-
-/* TX Descriptor */
-struct i40e_tx_desc {
-	__le64 buffer_addr; /* Address of descriptor's data buf */
-	__le64 cmd_type_offset_bsz;
-};
-
-#define I40E_TXD_QW1_DTYPE_SHIFT	0
-#define I40E_TXD_QW1_DTYPE_MASK		(0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
-
-enum i40e_tx_desc_dtype_value {
-	I40E_TX_DESC_DTYPE_DATA		= 0x0,
-	I40E_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
-	I40E_TX_DESC_DTYPE_CONTEXT	= 0x1,
-	I40E_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
-	I40E_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
-	I40E_TX_DESC_DTYPE_DDP_CTX	= 0x9,
-	I40E_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
-	I40E_TX_DESC_DTYPE_FLEX_CTX_1	= 0xC,
-	I40E_TX_DESC_DTYPE_FLEX_CTX_2	= 0xD,
-	I40E_TX_DESC_DTYPE_DESC_DONE	= 0xF
-};
-
-#define I40E_TXD_QW1_CMD_SHIFT	4
-#define I40E_TXD_QW1_CMD_MASK	(0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
-
-enum i40e_tx_desc_cmd_bits {
-	I40E_TX_DESC_CMD_EOP			= 0x0001,
-	I40E_TX_DESC_CMD_RS			= 0x0002,
-	I40E_TX_DESC_CMD_ICRC			= 0x0004,
-	I40E_TX_DESC_CMD_IL2TAG1		= 0x0008,
-	I40E_TX_DESC_CMD_DUMMY			= 0x0010,
-	I40E_TX_DESC_CMD_IIPT_NONIP		= 0x0000, /* 2 BITS */
-	I40E_TX_DESC_CMD_IIPT_IPV6		= 0x0020, /* 2 BITS */
-	I40E_TX_DESC_CMD_IIPT_IPV4		= 0x0040, /* 2 BITS */
-	I40E_TX_DESC_CMD_IIPT_IPV4_CSUM		= 0x0060, /* 2 BITS */
-	I40E_TX_DESC_CMD_FCOET			= 0x0080,
-	I40E_TX_DESC_CMD_L4T_EOFT_UNK		= 0x0000, /* 2 BITS */
-	I40E_TX_DESC_CMD_L4T_EOFT_TCP		= 0x0100, /* 2 BITS */
-	I40E_TX_DESC_CMD_L4T_EOFT_SCTP		= 0x0200, /* 2 BITS */
-	I40E_TX_DESC_CMD_L4T_EOFT_UDP		= 0x0300, /* 2 BITS */
-	I40E_TX_DESC_CMD_L4T_EOFT_EOF_N		= 0x0000, /* 2 BITS */
-	I40E_TX_DESC_CMD_L4T_EOFT_EOF_T		= 0x0100, /* 2 BITS */
-	I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI	= 0x0200, /* 2 BITS */
-	I40E_TX_DESC_CMD_L4T_EOFT_EOF_A		= 0x0300, /* 2 BITS */
-};
-
-#define I40E_TXD_QW1_OFFSET_SHIFT	16
-#define I40E_TXD_QW1_OFFSET_MASK	(0x3FFFFULL << \
-					 I40E_TXD_QW1_OFFSET_SHIFT)
-
-enum i40e_tx_desc_length_fields {
-	/* Note: These are predefined bit offsets */
-	I40E_TX_DESC_LENGTH_MACLEN_SHIFT	= 0, /* 7 BITS */
-	I40E_TX_DESC_LENGTH_IPLEN_SHIFT		= 7, /* 7 BITS */
-	I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT	= 14 /* 4 BITS */
-};
-
-#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT	34
-#define I40E_TXD_QW1_TX_BUF_SZ_MASK	(0x3FFFULL << \
-					 I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
-
-#define I40E_TXD_QW1_L2TAG1_SHIFT	48
-#define I40E_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
-
-/* Context descriptors */
-struct i40e_tx_context_desc {
-	__le32 tunneling_params;
-	__le16 l2tag2;
-	__le16 rsvd;
-	__le64 type_cmd_tso_mss;
-};
-
-#define I40E_TXD_CTX_QW1_DTYPE_SHIFT	0
-#define I40E_TXD_CTX_QW1_DTYPE_MASK	(0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
-
-#define I40E_TXD_CTX_QW1_CMD_SHIFT	4
-#define I40E_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
-
-enum i40e_tx_ctx_desc_cmd_bits {
-	I40E_TX_CTX_DESC_TSO		= 0x01,
-	I40E_TX_CTX_DESC_TSYN		= 0x02,
-	I40E_TX_CTX_DESC_IL2TAG2	= 0x04,
-	I40E_TX_CTX_DESC_IL2TAG2_IL2H	= 0x08,
-	I40E_TX_CTX_DESC_SWTCH_NOTAG	= 0x00,
-	I40E_TX_CTX_DESC_SWTCH_UPLINK	= 0x10,
-	I40E_TX_CTX_DESC_SWTCH_LOCAL	= 0x20,
-	I40E_TX_CTX_DESC_SWTCH_VSI	= 0x30,
-	I40E_TX_CTX_DESC_SWPE		= 0x40
-};
-
-#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT	30
-#define I40E_TXD_CTX_QW1_TSO_LEN_MASK	(0x3FFFFULL << \
-					 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
-
-#define I40E_TXD_CTX_QW1_MSS_SHIFT	50
-#define I40E_TXD_CTX_QW1_MSS_MASK	(0x3FFFULL << \
-					 I40E_TXD_CTX_QW1_MSS_SHIFT)
-
-#define I40E_TXD_CTX_QW1_VSI_SHIFT	50
-#define I40E_TXD_CTX_QW1_VSI_MASK	(0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
-
-#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT	0
-#define I40E_TXD_CTX_QW0_EXT_IP_MASK	(0x3ULL << \
-					 I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
-
-enum i40e_tx_ctx_desc_eipt_offload {
-	I40E_TX_CTX_EXT_IP_NONE		= 0x0,
-	I40E_TX_CTX_EXT_IP_IPV6		= 0x1,
-	I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM	= 0x2,
-	I40E_TX_CTX_EXT_IP_IPV4		= 0x3
-};
-
-#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT	2
-#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK	(0x3FULL << \
-					 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
-
-#define I40E_TXD_CTX_QW0_NATT_SHIFT	9
-#define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-
-#define I40E_TXD_CTX_UDP_TUNNELING	BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-
-#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
-				       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
-
-#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
-
-#define I40E_TXD_CTX_QW0_NATLEN_SHIFT	12
-#define I40E_TXD_CTX_QW0_NATLEN_MASK	(0X7FULL << \
-					 I40E_TXD_CTX_QW0_NATLEN_SHIFT)
-
-#define I40E_TXD_CTX_QW0_DECTTL_SHIFT	19
-#define I40E_TXD_CTX_QW0_DECTTL_MASK	(0xFULL << \
-					 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
-
-#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT	23
-#define I40E_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
-struct i40e_filter_program_desc {
-	__le32 qindex_flex_ptype_vsi;
-	__le32 rsvd;
-	__le32 dtype_cmd_cntindex;
-	__le32 fd_id;
-};
-#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT	0
-#define I40E_TXD_FLTR_QW0_QINDEX_MASK	(0x7FFUL << \
-					 I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
-#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT	11
-#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK	(0x7UL << \
-					 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
-#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT	17
-#define I40E_TXD_FLTR_QW0_PCTYPE_MASK	(0x3FUL << \
-					 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-
-/* Packet Classifier Types for filters */
-enum i40e_filter_pctype {
-	/* Note: Values 0-28 are reserved for future use.
-	 * Value 29, 30, 32 are not supported on XL710 and X710.
-	 */
-	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
-	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
-	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
-	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK	= 32,
-	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,
-	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
-	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
-	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
-	/* Note: Values 37-38 are reserved for future use.
-	 * Value 39, 40, 42 are not supported on XL710 and X710.
-	 */
-	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
-	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
-	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
-	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK	= 42,
-	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43,
-	I40E_FILTER_PCTYPE_NONF_IPV6_SCTP		= 44,
-	I40E_FILTER_PCTYPE_NONF_IPV6_OTHER		= 45,
-	I40E_FILTER_PCTYPE_FRAG_IPV6			= 46,
-	/* Note: Value 47 is reserved for future use */
-	I40E_FILTER_PCTYPE_FCOE_OX			= 48,
-	I40E_FILTER_PCTYPE_FCOE_RX			= 49,
-	I40E_FILTER_PCTYPE_FCOE_OTHER			= 50,
-	/* Note: Values 51-62 are reserved for future use */
-	I40E_FILTER_PCTYPE_L2_PAYLOAD			= 63,
-};
-
-enum i40e_filter_program_desc_dest {
-	I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET		= 0x0,
-	I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX	= 0x1,
-	I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER	= 0x2,
-};
-
-enum i40e_filter_program_desc_fd_status {
-	I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE			= 0x0,
-	I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID		= 0x1,
-	I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES	= 0x2,
-	I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES		= 0x3,
-};
-
-#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
-					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
-#define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
-					 I40E_TXD_FLTR_QW1_CMD_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_PCMD_SHIFT	(0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_PCMD_MASK	(0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
-
-enum i40e_filter_program_desc_pcmd {
-	I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE	= 0x1,
-	I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE		= 0x2,
-};
-
-#define I40E_TXD_FLTR_QW1_DEST_SHIFT	(0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
-						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
-					  I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
-#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK	(0x1FFUL << \
-					 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
-
-enum i40e_filter_type {
-	I40E_FLOW_DIRECTOR_FLTR = 0,
-	I40E_PE_QUAD_HASH_FLTR = 1,
-	I40E_ETHERTYPE_FLTR,
-	I40E_FCOE_CTX_FLTR,
-	I40E_MAC_VLAN_FLTR,
-	I40E_HASH_FLTR
-};
-
-struct i40e_vsi_context {
-	u16 seid;
-	u16 uplink_seid;
-	u16 vsi_number;
-	u16 vsis_allocated;
-	u16 vsis_unallocated;
-	u16 flags;
-	u8 pf_num;
-	u8 vf_num;
-	u8 connection_type;
-	struct i40e_aqc_vsi_properties_data info;
-};
-
-struct i40e_veb_context {
-	u16 seid;
-	u16 uplink_seid;
-	u16 veb_number;
-	u16 vebs_allocated;
-	u16 vebs_unallocated;
-	u16 flags;
-	struct i40e_aqc_get_veb_parameters_completion info;
-};
-
-/* Statistics collected by each port, VSI, VEB, and S-channel */
-struct i40e_eth_stats {
-	u64 rx_bytes;			/* gorc */
-	u64 rx_unicast;			/* uprc */
-	u64 rx_multicast;		/* mprc */
-	u64 rx_broadcast;		/* bprc */
-	u64 rx_discards;		/* rdpc */
-	u64 rx_unknown_protocol;	/* rupp */
-	u64 tx_bytes;			/* gotc */
-	u64 tx_unicast;			/* uptc */
-	u64 tx_multicast;		/* mptc */
-	u64 tx_broadcast;		/* bptc */
-	u64 tx_discards;		/* tdpc */
-	u64 tx_errors;			/* tepc */
-};
-
-/* Statistics collected per VEB per TC */
-struct i40e_veb_tc_stats {
-	u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
-	u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
-	u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
-	u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
-};
-
-/* Statistics collected by the MAC */
-struct i40e_hw_port_stats {
-	/* eth stats collected by the port */
-	struct i40e_eth_stats eth;
-
-	/* additional port specific stats */
-	u64 tx_dropped_link_down;	/* tdold */
-	u64 crc_errors;			/* crcerrs */
-	u64 illegal_bytes;		/* illerrc */
-	u64 error_bytes;		/* errbc */
-	u64 mac_local_faults;		/* mlfc */
-	u64 mac_remote_faults;		/* mrfc */
-	u64 rx_length_errors;		/* rlec */
-	u64 link_xon_rx;		/* lxonrxc */
-	u64 link_xoff_rx;		/* lxoffrxc */
-	u64 priority_xon_rx[8];		/* pxonrxc[8] */
-	u64 priority_xoff_rx[8];	/* pxoffrxc[8] */
-	u64 link_xon_tx;		/* lxontxc */
-	u64 link_xoff_tx;		/* lxofftxc */
-	u64 priority_xon_tx[8];		/* pxontxc[8] */
-	u64 priority_xoff_tx[8];	/* pxofftxc[8] */
-	u64 priority_xon_2_xoff[8];	/* pxon2offc[8] */
-	u64 rx_size_64;			/* prc64 */
-	u64 rx_size_127;		/* prc127 */
-	u64 rx_size_255;		/* prc255 */
-	u64 rx_size_511;		/* prc511 */
-	u64 rx_size_1023;		/* prc1023 */
-	u64 rx_size_1522;		/* prc1522 */
-	u64 rx_size_big;		/* prc9522 */
-	u64 rx_undersize;		/* ruc */
-	u64 rx_fragments;		/* rfc */
-	u64 rx_oversize;		/* roc */
-	u64 rx_jabber;			/* rjc */
-	u64 tx_size_64;			/* ptc64 */
-	u64 tx_size_127;		/* ptc127 */
-	u64 tx_size_255;		/* ptc255 */
-	u64 tx_size_511;		/* ptc511 */
-	u64 tx_size_1023;		/* ptc1023 */
-	u64 tx_size_1522;		/* ptc1522 */
-	u64 tx_size_big;		/* ptc9522 */
-	u64 mac_short_packet_dropped;	/* mspdc */
-	u64 checksum_error;		/* xec */
-	/* flow director stats */
-	u64 fd_atr_match;
-	u64 fd_sb_match;
-	u64 fd_atr_tunnel_match;
-	u32 fd_atr_status;
-	u32 fd_sb_status;
-	/* EEE LPI */
-	u32 tx_lpi_status;
-	u32 rx_lpi_status;
-	u64 tx_lpi_count;		/* etlpic */
-	u64 rx_lpi_count;		/* erlpic */
-};
-
-/* Checksum and Shadow RAM pointers */
-#define I40E_SR_NVM_CONTROL_WORD		0x00
-#define I40E_EMP_MODULE_PTR			0x0F
-#define I40E_SR_EMP_MODULE_PTR			0x48
-#define I40E_NVM_OEM_VER_OFF			0x83
-#define I40E_SR_NVM_DEV_STARTER_VERSION		0x18
-#define I40E_SR_NVM_WAKE_ON_LAN			0x19
-#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR	0x27
-#define I40E_SR_NVM_EETRACK_LO			0x2D
-#define I40E_SR_NVM_EETRACK_HI			0x2E
-#define I40E_SR_VPD_PTR				0x2F
-#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR		0x3E
-#define I40E_SR_SW_CHECKSUM_WORD		0x3F
-
-/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
-#define I40E_SR_VPD_MODULE_MAX_SIZE		1024
-#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE	1024
-#define I40E_SR_CONTROL_WORD_1_SHIFT		0x06
-#define I40E_SR_CONTROL_WORD_1_MASK	(0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
-#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID	BIT(5)
-#define I40E_SR_NVM_MAP_STRUCTURE_TYPE		BIT(12)
-#define I40E_PTR_TYPE				BIT(15)
-
-/* Shadow RAM related */
-#define I40E_SR_SECTOR_SIZE_IN_WORDS	0x800
-#define I40E_SR_WORDS_IN_1KB		512
-/* Checksum should be calculated such that after adding all the words,
- * including the checksum word itself, the sum should be 0xBABA.
- */
-#define I40E_SR_SW_CHECKSUM_BASE	0xBABA
-
-#define I40E_SRRD_SRCTL_ATTEMPTS	100000
-
-enum i40e_switch_element_types {
-	I40E_SWITCH_ELEMENT_TYPE_MAC	= 1,
-	I40E_SWITCH_ELEMENT_TYPE_PF	= 2,
-	I40E_SWITCH_ELEMENT_TYPE_VF	= 3,
-	I40E_SWITCH_ELEMENT_TYPE_EMP	= 4,
-	I40E_SWITCH_ELEMENT_TYPE_BMC	= 6,
-	I40E_SWITCH_ELEMENT_TYPE_PE	= 16,
-	I40E_SWITCH_ELEMENT_TYPE_VEB	= 17,
-	I40E_SWITCH_ELEMENT_TYPE_PA	= 18,
-	I40E_SWITCH_ELEMENT_TYPE_VSI	= 19,
-};
-
-/* Supported EtherType filters */
-enum i40e_ether_type_index {
-	I40E_ETHER_TYPE_1588		= 0,
-	I40E_ETHER_TYPE_FIP		= 1,
-	I40E_ETHER_TYPE_OUI_EXTENDED	= 2,
-	I40E_ETHER_TYPE_MAC_CONTROL	= 3,
-	I40E_ETHER_TYPE_LLDP		= 4,
-	I40E_ETHER_TYPE_EVB_PROTOCOL1	= 5,
-	I40E_ETHER_TYPE_EVB_PROTOCOL2	= 6,
-	I40E_ETHER_TYPE_QCN_CNM		= 7,
-	I40E_ETHER_TYPE_8021X		= 8,
-	I40E_ETHER_TYPE_ARP		= 9,
-	I40E_ETHER_TYPE_RSV1		= 10,
-	I40E_ETHER_TYPE_RSV2		= 11,
-};
-
-/* Filter context base size is 1K */
-#define I40E_HASH_FILTER_BASE_SIZE	1024
-/* Supported Hash filter values */
-enum i40e_hash_filter_size {
-	I40E_HASH_FILTER_SIZE_1K	= 0,
-	I40E_HASH_FILTER_SIZE_2K	= 1,
-	I40E_HASH_FILTER_SIZE_4K	= 2,
-	I40E_HASH_FILTER_SIZE_8K	= 3,
-	I40E_HASH_FILTER_SIZE_16K	= 4,
-	I40E_HASH_FILTER_SIZE_32K	= 5,
-	I40E_HASH_FILTER_SIZE_64K	= 6,
-	I40E_HASH_FILTER_SIZE_128K	= 7,
-	I40E_HASH_FILTER_SIZE_256K	= 8,
-	I40E_HASH_FILTER_SIZE_512K	= 9,
-	I40E_HASH_FILTER_SIZE_1M	= 10,
-};
-
-/* DMA context base size is 0.5K */
-#define I40E_DMA_CNTX_BASE_SIZE		512
-/* Supported DMA context values */
-enum i40e_dma_cntx_size {
-	I40E_DMA_CNTX_SIZE_512		= 0,
-	I40E_DMA_CNTX_SIZE_1K		= 1,
-	I40E_DMA_CNTX_SIZE_2K		= 2,
-	I40E_DMA_CNTX_SIZE_4K		= 3,
-	I40E_DMA_CNTX_SIZE_8K		= 4,
-	I40E_DMA_CNTX_SIZE_16K		= 5,
-	I40E_DMA_CNTX_SIZE_32K		= 6,
-	I40E_DMA_CNTX_SIZE_64K		= 7,
-	I40E_DMA_CNTX_SIZE_128K		= 8,
-	I40E_DMA_CNTX_SIZE_256K		= 9,
-};
-
-/* Supported Hash look up table (LUT) sizes */
-enum i40e_hash_lut_size {
-	I40E_HASH_LUT_SIZE_128		= 0,
-	I40E_HASH_LUT_SIZE_512		= 1,
-};
-
-/* Structure to hold a per PF filter control settings */
-struct i40e_filter_control_settings {
-	/* number of PE Quad Hash filter buckets */
-	enum i40e_hash_filter_size pe_filt_num;
-	/* number of PE Quad Hash contexts */
-	enum i40e_dma_cntx_size pe_cntx_num;
-	/* number of FCoE filter buckets */
-	enum i40e_hash_filter_size fcoe_filt_num;
-	/* number of FCoE DDP contexts */
-	enum i40e_dma_cntx_size fcoe_cntx_num;
-	/* size of the Hash LUT */
-	enum i40e_hash_lut_size	hash_lut_size;
-	/* enable FDIR filters for PF and its VFs */
-	bool enable_fdir;
-	/* enable Ethertype filters for PF and its VFs */
-	bool enable_ethtype;
-	/* enable MAC/VLAN filters for PF and its VFs */
-	bool enable_macvlan;
-};
-
-/* Structure to hold device level control filter counts */
-struct i40e_control_filter_stats {
-	u16 mac_etype_used;   /* Used perfect match MAC/EtherType filters */
-	u16 etype_used;       /* Used perfect EtherType filters */
-	u16 mac_etype_free;   /* Un-used perfect match MAC/EtherType filters */
-	u16 etype_free;       /* Un-used perfect EtherType filters */
-};
-
-enum i40e_reset_type {
-	I40E_RESET_POR		= 0,
-	I40E_RESET_CORER	= 1,
-	I40E_RESET_GLOBR	= 2,
-	I40E_RESET_EMPR		= 3,
-};
-
-/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR	0x06
-#define I40E_SR_LLDP_CFG_PTR	0x31
-
-/* RSS Hash Table Size */
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_512	0x00010000
-
-/* INPUT SET MASK for RSS, flow director and flexible payload */
-#define I40E_FD_INSET_L3_SRC_SHIFT		47
-#define I40E_FD_INSET_L3_SRC_WORD_MASK		(0x3ULL << \
-						 I40E_FD_INSET_L3_SRC_SHIFT)
-#define I40E_FD_INSET_L3_DST_SHIFT		35
-#define I40E_FD_INSET_L3_DST_WORD_MASK		(0x3ULL << \
-						 I40E_FD_INSET_L3_DST_SHIFT)
-#define I40E_FD_INSET_L4_SRC_SHIFT		34
-#define I40E_FD_INSET_L4_SRC_WORD_MASK		(0x1ULL << \
-						 I40E_FD_INSET_L4_SRC_SHIFT)
-#define I40E_FD_INSET_L4_DST_SHIFT		33
-#define I40E_FD_INSET_L4_DST_WORD_MASK		(0x1ULL << \
-						 I40E_FD_INSET_L4_DST_SHIFT)
-#define I40E_FD_INSET_VERIFY_TAG_SHIFT		31
-#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK	(0x3ULL << \
-						 I40E_FD_INSET_VERIFY_TAG_SHIFT)
-
-#define I40E_FD_INSET_FLEX_WORD50_SHIFT		17
-#define I40E_FD_INSET_FLEX_WORD50_MASK		(0x1ULL << \
-					I40E_FD_INSET_FLEX_WORD50_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD51_SHIFT		16
-#define I40E_FD_INSET_FLEX_WORD51_MASK		(0x1ULL << \
-					I40E_FD_INSET_FLEX_WORD51_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD52_SHIFT		15
-#define I40E_FD_INSET_FLEX_WORD52_MASK		(0x1ULL << \
-					I40E_FD_INSET_FLEX_WORD52_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD53_SHIFT		14
-#define I40E_FD_INSET_FLEX_WORD53_MASK		(0x1ULL << \
-					I40E_FD_INSET_FLEX_WORD53_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD54_SHIFT		13
-#define I40E_FD_INSET_FLEX_WORD54_MASK		(0x1ULL << \
-					I40E_FD_INSET_FLEX_WORD54_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD55_SHIFT		12
-#define I40E_FD_INSET_FLEX_WORD55_MASK		(0x1ULL << \
-					I40E_FD_INSET_FLEX_WORD55_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD56_SHIFT		11
-#define I40E_FD_INSET_FLEX_WORD56_MASK		(0x1ULL << \
-					I40E_FD_INSET_FLEX_WORD56_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD57_SHIFT		10
-#define I40E_FD_INSET_FLEX_WORD57_MASK		(0x1ULL << \
-					I40E_FD_INSET_FLEX_WORD57_SHIFT)
-
-/* Version format for Dynamic Device Personalization(DDP) */
-struct i40e_ddp_version {
-	u8 major;
-	u8 minor;
-	u8 update;
-	u8 draft;
-};
-
-#define I40E_DDP_NAME_SIZE	32
-
-/* Package header */
-struct i40e_package_header {
-	struct i40e_ddp_version version;
-	u32 segment_count;
-	u32 segment_offset[1];
-};
-
-/* Generic segment header */
-struct i40e_generic_seg_header {
-#define SEGMENT_TYPE_METADATA	0x00000001
-#define SEGMENT_TYPE_NOTES	0x00000002
-#define SEGMENT_TYPE_I40E	0x00000011
-#define SEGMENT_TYPE_X722	0x00000012
-	u32 type;
-	struct i40e_ddp_version version;
-	u32 size;
-	char name[I40E_DDP_NAME_SIZE];
-};
-
-struct i40e_metadata_segment {
-	struct i40e_generic_seg_header header;
-	struct i40e_ddp_version version;
-	u32 track_id;
-	char name[I40E_DDP_NAME_SIZE];
-};
-
-struct i40e_device_id_entry {
-	u32 vendor_dev_id;
-	u32 sub_vendor_dev_id;
-};
-
-struct i40e_profile_segment {
-	struct i40e_generic_seg_header header;
-	struct i40e_ddp_version version;
-	char name[I40E_DDP_NAME_SIZE];
-	u32 device_table_count;
-	struct i40e_device_id_entry device_table[1];
-};
-
-struct i40e_section_table {
-	u32 section_count;
-	u32 section_offset[1];
-};
-
-struct i40e_profile_section_header {
-	u16 tbl_size;
-	u16 data_end;
-	struct {
-#define SECTION_TYPE_INFO	0x00000010
-#define SECTION_TYPE_MMIO	0x00000800
-#define SECTION_TYPE_AQ		0x00000801
-#define SECTION_TYPE_NOTE	0x80000000
-#define SECTION_TYPE_NAME	0x80000001
-		u32 type;
-		u32 offset;
-		u32 size;
-	} section;
-};
-
-struct i40e_profile_info {
-	u32 track_id;
-	struct i40e_ddp_version version;
-	u8 op;
-#define I40E_DDP_ADD_TRACKID		0x01
-#define I40E_DDP_REMOVE_TRACKID	0x02
-	u8 reserved[7];
-	u8 name[I40E_DDP_NAME_SIZE];
-};
-#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
deleted file mode 100644
index 96e537a35000..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ /dev/null
@@ -1,427 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40EVF_H_
-#define _I40EVF_H_
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/aer.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/sctp.h>
-#include <linux/ipv6.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/wait.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/skbuff.h>
-#include <linux/dma-mapping.h>
-#include <linux/etherdevice.h>
-#include <linux/socket.h>
-#include <linux/jiffies.h>
-#include <net/ip6_checksum.h>
-#include <net/pkt_cls.h>
-#include <net/udp.h>
-#include <net/tc_act/tc_gact.h>
-#include <net/tc_act/tc_mirred.h>
-
-#include "i40e_type.h"
-#include <linux/avf/virtchnl.h>
-#include "i40e_txrx.h"
-
-#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-#define PFX "i40evf: "
-
-/* VSI state flags shared with common code */
-enum i40evf_vsi_state_t {
-	__I40E_VSI_DOWN,
-	/* This must be last as it determines the size of the BITMAP */
-	__I40E_VSI_STATE_SIZE__,
-};
-
-/* dummy struct to make common code less painful */
-struct i40e_vsi {
-	struct i40evf_adapter *back;
-	struct net_device *netdev;
-	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-	u16 seid;
-	u16 id;
-	DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
-	int base_vector;
-	u16 work_limit;
-	u16 qs_handle;
-	void *priv;     /* client driver data reference. */
-};
-
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40EVF_RX_BUFFER_WRITE	16	/* Must be power of 2 */
-#define I40EVF_DEFAULT_TXD	512
-#define I40EVF_DEFAULT_RXD	512
-#define I40EVF_MAX_TXD		4096
-#define I40EVF_MIN_TXD		64
-#define I40EVF_MAX_RXD		4096
-#define I40EVF_MIN_RXD		64
-#define I40EVF_REQ_DESCRIPTOR_MULTIPLE	32
-#define I40EVF_MAX_AQ_BUF_SIZE	4096
-#define I40EVF_AQ_LEN		32
-#define I40EVF_AQ_MAX_ERR	20 /* times to try before resetting AQ */
-
-#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-
-#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
-#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
-#define I40E_TX_CTXTDESC(R, i) \
-	(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
-#define I40EVF_MAX_REQ_QUEUES 4
-
-#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
-#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4)
-#define I40EVF_MBPS_DIVISOR	125000 /* divisor to convert to Mbps */
-
-/* MAX_MSIX_Q_VECTORS of these are allocated,
- * but we only use one per queue-specific vector.
- */
-struct i40e_q_vector {
-	struct i40evf_adapter *adapter;
-	struct i40e_vsi *vsi;
-	struct napi_struct napi;
-	struct i40e_ring_container rx;
-	struct i40e_ring_container tx;
-	u32 ring_mask;
-	u8 itr_countdown;	/* when 0 should adjust adaptive ITR */
-	u8 num_ringpairs;	/* total number of ring pairs in vector */
-	u16 v_idx;		/* index in the vsi->q_vector array. */
-	u16 reg_idx;		/* register index of the interrupt */
-	char name[IFNAMSIZ + 15];
-	bool arm_wb_state;
-	cpumask_t affinity_mask;
-	struct irq_affinity_notify affinity_notify;
-};
-
-/* Helper macros to switch between ints/sec and what the register uses.
- * And yes, it's the same math going both ways.  The lowest value
- * supported by all of the i40e hardware is 8.
- */
-#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
-	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
-#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
-
-#define I40EVF_DESC_UNUSED(R) \
-	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
-	(R)->next_to_clean - (R)->next_to_use - 1)
-
-#define I40EVF_RX_DESC_ADV(R, i)	\
-	(&(((union i40e_adv_rx_desc *)((R).desc))[i]))
-#define I40EVF_TX_DESC_ADV(R, i)	\
-	(&(((union i40e_adv_tx_desc *)((R).desc))[i]))
-#define I40EVF_TX_CTXTDESC_ADV(R, i)	\
-	(&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
-
-#define OTHER_VECTOR 1
-#define NONQ_VECS (OTHER_VECTOR)
-
-#define MIN_MSIX_Q_VECTORS 1
-#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
-
-#define I40EVF_QUEUE_END_OF_LIST 0x7FF
-#define I40EVF_FREE_VECTOR 0x7FFF
-struct i40evf_mac_filter {
-	struct list_head list;
-	u8 macaddr[ETH_ALEN];
-	bool remove;		/* filter needs to be removed */
-	bool add;		/* filter needs to be added */
-};
-
-struct i40evf_vlan_filter {
-	struct list_head list;
-	u16 vlan;
-	bool remove;		/* filter needs to be removed */
-	bool add;		/* filter needs to be added */
-};
-
-#define I40EVF_MAX_TRAFFIC_CLASS	4
-/* State of traffic class creation */
-enum i40evf_tc_state_t {
-	__I40EVF_TC_INVALID, /* no traffic class, default state */
-	__I40EVF_TC_RUNNING, /* traffic classes have been created */
-};
-
-/* channel info */
-struct i40evf_channel_config {
-	struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS];
-	enum i40evf_tc_state_t state;
-	u8 total_qps;
-};
-
-/* State of cloud filter */
-enum i40evf_cloud_filter_state_t {
-	__I40EVF_CF_INVALID,	 /* cloud filter not added */
-	__I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */
-	__I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */
-	__I40EVF_CF_ACTIVE,	 /* cloud filter is active */
-};
-
-/* Driver state. The order of these is important! */
-enum i40evf_state_t {
-	__I40EVF_STARTUP,		/* driver loaded, probe complete */
-	__I40EVF_REMOVE,		/* driver is being unloaded */
-	__I40EVF_INIT_VERSION_CHECK,	/* aq msg sent, awaiting reply */
-	__I40EVF_INIT_GET_RESOURCES,	/* aq msg sent, awaiting reply */
-	__I40EVF_INIT_SW,		/* got resources, setting up structs */
-	__I40EVF_RESETTING,		/* in reset */
-	/* Below here, watchdog is running */
-	__I40EVF_DOWN,			/* ready, can be opened */
-	__I40EVF_DOWN_PENDING,		/* descending, waiting for watchdog */
-	__I40EVF_TESTING,		/* in ethtool self-test */
-	__I40EVF_RUNNING,		/* opened, working */
-};
-
-enum i40evf_critical_section_t {
-	__I40EVF_IN_CRITICAL_TASK,	/* cannot be interrupted */
-	__I40EVF_IN_CLIENT_TASK,
-	__I40EVF_IN_REMOVE_TASK,	/* device being removed */
-};
-
-#define I40EVF_CLOUD_FIELD_OMAC		0x01
-#define I40EVF_CLOUD_FIELD_IMAC		0x02
-#define I40EVF_CLOUD_FIELD_IVLAN	0x04
-#define I40EVF_CLOUD_FIELD_TEN_ID	0x08
-#define I40EVF_CLOUD_FIELD_IIP		0x10
-
-#define I40EVF_CF_FLAGS_OMAC	I40EVF_CLOUD_FIELD_OMAC
-#define I40EVF_CF_FLAGS_IMAC	I40EVF_CLOUD_FIELD_IMAC
-#define I40EVF_CF_FLAGS_IMAC_IVLAN	(I40EVF_CLOUD_FIELD_IMAC |\
-					 I40EVF_CLOUD_FIELD_IVLAN)
-#define I40EVF_CF_FLAGS_IMAC_TEN_ID	(I40EVF_CLOUD_FIELD_IMAC |\
-					 I40EVF_CLOUD_FIELD_TEN_ID)
-#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC	(I40EVF_CLOUD_FIELD_OMAC |\
-						 I40EVF_CLOUD_FIELD_IMAC |\
-						 I40EVF_CLOUD_FIELD_TEN_ID)
-#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID	(I40EVF_CLOUD_FIELD_IMAC |\
-						 I40EVF_CLOUD_FIELD_IVLAN |\
-						 I40EVF_CLOUD_FIELD_TEN_ID)
-#define I40EVF_CF_FLAGS_IIP	I40E_CLOUD_FIELD_IIP
-
-/* bookkeeping of cloud filters */
-struct i40evf_cloud_filter {
-	enum i40evf_cloud_filter_state_t state;
-	struct list_head list;
-	struct virtchnl_filter f;
-	unsigned long cookie;
-	bool del;		/* filter needs to be deleted */
-	bool add;		/* filter needs to be added */
-};
-
-/* board specific private data structure */
-struct i40evf_adapter {
-	struct timer_list watchdog_timer;
-	struct work_struct reset_task;
-	struct work_struct adminq_task;
-	struct delayed_work client_task;
-	struct delayed_work init_task;
-	wait_queue_head_t down_waitqueue;
-	struct i40e_q_vector *q_vectors;
-	struct list_head vlan_filter_list;
-	struct list_head mac_filter_list;
-	/* Lock to protect accesses to MAC and VLAN lists */
-	spinlock_t mac_vlan_list_lock;
-	char misc_vector_name[IFNAMSIZ + 9];
-	int num_active_queues;
-	int num_req_queues;
-
-	/* TX */
-	struct i40e_ring *tx_rings;
-	u32 tx_timeout_count;
-	u32 tx_desc_count;
-
-	/* RX */
-	struct i40e_ring *rx_rings;
-	u64 hw_csum_rx_error;
-	u32 rx_desc_count;
-	int num_msix_vectors;
-	int num_iwarp_msix;
-	int iwarp_base_vector;
-	u32 client_pending;
-	struct i40e_client_instance *cinst;
-	struct msix_entry *msix_entries;
-
-	u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED		BIT(0)
-#define I40EVF_FLAG_PF_COMMS_FAILED		BIT(3)
-#define I40EVF_FLAG_RESET_PENDING		BIT(4)
-#define I40EVF_FLAG_RESET_NEEDED		BIT(5)
-#define I40EVF_FLAG_WB_ON_ITR_CAPABLE		BIT(6)
-#define I40EVF_FLAG_ADDR_SET_BY_PF		BIT(8)
-#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED	BIT(9)
-#define I40EVF_FLAG_CLIENT_NEEDS_OPEN		BIT(10)
-#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE		BIT(11)
-#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS	BIT(12)
-#define I40EVF_FLAG_PROMISC_ON			BIT(13)
-#define I40EVF_FLAG_ALLMULTI_ON			BIT(14)
-#define I40EVF_FLAG_LEGACY_RX			BIT(15)
-#define I40EVF_FLAG_REINIT_ITR_NEEDED		BIT(16)
-#define I40EVF_FLAG_QUEUES_DISABLED		BIT(17)
-/* duplicates for common code */
-#define I40E_FLAG_DCB_ENABLED			0
-#define I40E_FLAG_RX_CSUM_ENABLED		I40EVF_FLAG_RX_CSUM_ENABLED
-#define I40E_FLAG_LEGACY_RX			I40EVF_FLAG_LEGACY_RX
-	/* flags for admin queue service task */
-	u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES		BIT(0)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES		BIT(1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER		BIT(2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER		BIT(3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER		BIT(4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER		BIT(5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		BIT(6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS		BIT(7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET		BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS		BIT(9) /* direct AQ config */
-#define I40EVF_FLAG_AQ_GET_CONFIG		BIT(10)
-/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define I40EVF_FLAG_AQ_GET_HENA			BIT(11)
-#define I40EVF_FLAG_AQ_SET_HENA			BIT(12)
-#define I40EVF_FLAG_AQ_SET_RSS_KEY		BIT(13)
-#define I40EVF_FLAG_AQ_SET_RSS_LUT		BIT(14)
-#define I40EVF_FLAG_AQ_REQUEST_PROMISC		BIT(15)
-#define I40EVF_FLAG_AQ_RELEASE_PROMISC		BIT(16)
-#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI		BIT(17)
-#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI		BIT(18)
-#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING	BIT(19)
-#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING	BIT(20)
-#define I40EVF_FLAG_AQ_ENABLE_CHANNELS		BIT(21)
-#define I40EVF_FLAG_AQ_DISABLE_CHANNELS		BIT(22)
-#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER		BIT(23)
-#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER		BIT(24)
-
-	/* OS defined structs */
-	struct net_device *netdev;
-	struct pci_dev *pdev;
-
-	struct i40e_hw hw; /* defined in i40e_type.h */
-
-	enum i40evf_state_t state;
-	unsigned long crit_section;
-
-	struct work_struct watchdog_task;
-	bool netdev_registered;
-	bool link_up;
-	enum virtchnl_link_speed link_speed;
-	enum virtchnl_ops current_op;
-#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
-			    (_a)->vf_res->vf_cap_flags & \
-				VIRTCHNL_VF_OFFLOAD_IWARP : \
-			    0)
-#define CLIENT_ENABLED(_a) ((_a)->cinst)
-/* RSS by the PF should be preferred over RSS via other methods. */
-#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \
-		    VIRTCHNL_VF_OFFLOAD_RSS_PF)
-#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \
-		    VIRTCHNL_VF_OFFLOAD_RSS_AQ)
-#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \
-		       (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
-			VIRTCHNL_VF_OFFLOAD_RSS_PF)))
-#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
-			  VIRTCHNL_VF_OFFLOAD_VLAN)
-	struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
-	struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
-	struct virtchnl_version_info pf_version;
-#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
-		       ((_a)->pf_version.minor == 1))
-	u16 msg_enable;
-	struct i40e_eth_stats current_stats;
-	struct i40e_vsi vsi;
-	u32 aq_wait_count;
-	/* RSS stuff */
-	u64 hena;
-	u16 rss_key_size;
-	u16 rss_lut_size;
-	u8 *rss_key;
-	u8 *rss_lut;
-	/* ADQ related members */
-	struct i40evf_channel_config ch_config;
-	u8 num_tc;
-	struct list_head cloud_filter_list;
-	/* lock to protest access to the cloud filter list */
-	spinlock_t cloud_filter_list_lock;
-	u16 num_cloud_filters;
-};
-
-
-/* Ethtool Private Flags */
-
-/* lan device */
-struct i40e_device {
-	struct list_head list;
-	struct i40evf_adapter *vf;
-};
-
-/* needed by i40evf_ethtool.c */
-extern char i40evf_driver_name[];
-extern const char i40evf_driver_version[];
-
-int i40evf_up(struct i40evf_adapter *adapter);
-void i40evf_down(struct i40evf_adapter *adapter);
-int i40evf_process_config(struct i40evf_adapter *adapter);
-void i40evf_schedule_reset(struct i40evf_adapter *adapter);
-void i40evf_reset(struct i40evf_adapter *adapter);
-void i40evf_set_ethtool_ops(struct net_device *netdev);
-void i40evf_update_stats(struct i40evf_adapter *adapter);
-void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
-int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
-void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
-void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
-void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
-
-void i40e_napi_add_all(struct i40evf_adapter *adapter);
-void i40e_napi_del_all(struct i40evf_adapter *adapter);
-
-int i40evf_send_api_ver(struct i40evf_adapter *adapter);
-int i40evf_verify_api_ver(struct i40evf_adapter *adapter);
-int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter);
-int i40evf_get_vf_config(struct i40evf_adapter *adapter);
-void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush);
-void i40evf_configure_queues(struct i40evf_adapter *adapter);
-void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
-void i40evf_enable_queues(struct i40evf_adapter *adapter);
-void i40evf_disable_queues(struct i40evf_adapter *adapter);
-void i40evf_map_queues(struct i40evf_adapter *adapter);
-int i40evf_request_queues(struct i40evf_adapter *adapter, int num);
-void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
-void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
-void i40evf_add_vlans(struct i40evf_adapter *adapter);
-void i40evf_del_vlans(struct i40evf_adapter *adapter);
-void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
-void i40evf_request_stats(struct i40evf_adapter *adapter);
-void i40evf_request_reset(struct i40evf_adapter *adapter);
-void i40evf_get_hena(struct i40evf_adapter *adapter);
-void i40evf_set_hena(struct i40evf_adapter *adapter);
-void i40evf_set_rss_key(struct i40evf_adapter *adapter);
-void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
-void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter);
-void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter);
-void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
-				enum virtchnl_ops v_opcode,
-				i40e_status v_retval, u8 *msg, u16 msglen);
-int i40evf_config_rss(struct i40evf_adapter *adapter);
-int i40evf_lan_add_device(struct i40evf_adapter *adapter);
-int i40evf_lan_del_device(struct i40evf_adapter *adapter);
-void i40evf_client_subtask(struct i40evf_adapter *adapter);
-void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
-void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
-void i40evf_notify_client_open(struct i40e_vsi *vsi);
-void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
-void i40evf_enable_channels(struct i40evf_adapter *adapter);
-void i40evf_disable_channels(struct i40evf_adapter *adapter);
-void i40evf_add_cloud_filter(struct i40evf_adapter *adapter);
-void i40evf_del_cloud_filter(struct i40evf_adapter *adapter);
-#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
deleted file mode 100644
index 3cc9d60d0d72..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ /dev/null
@@ -1,579 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#include <linux/list.h>
-#include <linux/errno.h>
-
-#include "i40evf.h"
-#include "i40e_prototype.h"
-#include "i40evf_client.h"
-
-static
-const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
-static struct i40e_client *vf_registered_client;
-static LIST_HEAD(i40evf_devices);
-static DEFINE_MUTEX(i40evf_device_mutex);
-
-static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
-				       struct i40e_client *client,
-				       u8 *msg, u16 len);
-
-static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
-				      struct i40e_client *client,
-				      struct i40e_qvlist_info *qvlist_info);
-
-static struct i40e_ops i40evf_lan_ops = {
-	.virtchnl_send = i40evf_client_virtchnl_send,
-	.setup_qvlist = i40evf_client_setup_qvlist,
-};
-
-/**
- * i40evf_client_get_params - retrieve relevant client parameters
- * @vsi: VSI with parameters
- * @params: client param struct
- **/
-static
-void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
-{
-	int i;
-
-	memset(params, 0, sizeof(struct i40e_params));
-	params->mtu = vsi->netdev->mtu;
-	params->link_up = vsi->back->link_up;
-
-	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
-		params->qos.prio_qos[i].tc = 0;
-		params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
-	}
-}
-
-/**
- * i40evf_notify_client_message - call the client message receive callback
- * @vsi: the VSI associated with this client
- * @msg: message buffer
- * @len: length of message
- *
- * If there is a client to this VSI, call the client
- **/
-void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
-{
-	struct i40e_client_instance *cinst;
-
-	if (!vsi)
-		return;
-
-	cinst = vsi->back->cinst;
-	if (!cinst || !cinst->client || !cinst->client->ops ||
-	    !cinst->client->ops->virtchnl_receive) {
-		dev_dbg(&vsi->back->pdev->dev,
-			"Cannot locate client instance virtchnl_receive function\n");
-		return;
-	}
-	cinst->client->ops->virtchnl_receive(&cinst->lan_info,  cinst->client,
-					     msg, len);
-}
-
-/**
- * i40evf_notify_client_l2_params - call the client notify callback
- * @vsi: the VSI with l2 param changes
- *
- * If there is a client to this VSI, call the client
- **/
-void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
-{
-	struct i40e_client_instance *cinst;
-	struct i40e_params params;
-
-	if (!vsi)
-		return;
-
-	cinst = vsi->back->cinst;
-
-	if (!cinst || !cinst->client || !cinst->client->ops ||
-	    !cinst->client->ops->l2_param_change) {
-		dev_dbg(&vsi->back->pdev->dev,
-			"Cannot locate client instance l2_param_change function\n");
-		return;
-	}
-	i40evf_client_get_params(vsi, &params);
-	cinst->lan_info.params = params;
-	cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
-					    &params);
-}
-
-/**
- * i40evf_notify_client_open - call the client open callback
- * @vsi: the VSI with netdev opened
- *
- * If there is a client to this netdev, call the client with open
- **/
-void i40evf_notify_client_open(struct i40e_vsi *vsi)
-{
-	struct i40evf_adapter *adapter = vsi->back;
-	struct i40e_client_instance *cinst = adapter->cinst;
-	int ret;
-
-	if (!cinst || !cinst->client || !cinst->client->ops ||
-	    !cinst->client->ops->open) {
-		dev_dbg(&vsi->back->pdev->dev,
-			"Cannot locate client instance open function\n");
-		return;
-	}
-	if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
-		ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
-		if (!ret)
-			set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
-	}
-}
-
-/**
- * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
- * @ldev: pointer to L2 context.
- *
- * Return 0 on success or < 0 on error
- **/
-static int i40evf_client_release_qvlist(struct i40e_info *ldev)
-{
-	struct i40evf_adapter *adapter = ldev->vf;
-	i40e_status err;
-
-	if (adapter->aq_required)
-		return -EAGAIN;
-
-	err = i40e_aq_send_msg_to_pf(&adapter->hw,
-			VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
-			I40E_SUCCESS, NULL, 0, NULL);
-
-	if (err)
-		dev_err(&adapter->pdev->dev,
-			"Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
-			err, adapter->hw.aq.asq_last_status);
-
-	return err;
-}
-
-/**
- * i40evf_notify_client_close - call the client close callback
- * @vsi: the VSI with netdev closed
- * @reset: true when close called due to reset pending
- *
- * If there is a client to this netdev, call the client with close
- **/
-void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
-{
-	struct i40evf_adapter *adapter = vsi->back;
-	struct i40e_client_instance *cinst = adapter->cinst;
-
-	if (!cinst || !cinst->client || !cinst->client->ops ||
-	    !cinst->client->ops->close) {
-		dev_dbg(&vsi->back->pdev->dev,
-			"Cannot locate client instance close function\n");
-		return;
-	}
-	cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
-	i40evf_client_release_qvlist(&cinst->lan_info);
-	clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
-}
-
-/**
- * i40evf_client_add_instance - add a client instance to the instance list
- * @adapter: pointer to the board struct
- *
- * Returns cinst ptr on success, NULL on failure
- **/
-static struct i40e_client_instance *
-i40evf_client_add_instance(struct i40evf_adapter *adapter)
-{
-	struct i40e_client_instance *cinst = NULL;
-	struct i40e_vsi *vsi = &adapter->vsi;
-	struct netdev_hw_addr *mac = NULL;
-	struct i40e_params params;
-
-	if (!vf_registered_client)
-		goto out;
-
-	if (adapter->cinst) {
-		cinst = adapter->cinst;
-		goto out;
-	}
-
-	cinst = kzalloc(sizeof(*cinst), GFP_KERNEL);
-	if (!cinst)
-		goto out;
-
-	cinst->lan_info.vf = (void *)adapter;
-	cinst->lan_info.netdev = vsi->netdev;
-	cinst->lan_info.pcidev = adapter->pdev;
-	cinst->lan_info.fid = 0;
-	cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
-	cinst->lan_info.hw_addr = adapter->hw.hw_addr;
-	cinst->lan_info.ops = &i40evf_lan_ops;
-	cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
-	cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
-	cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
-	i40evf_client_get_params(vsi, &params);
-	cinst->lan_info.params = params;
-	set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
-
-	cinst->lan_info.msix_count = adapter->num_iwarp_msix;
-	cinst->lan_info.msix_entries =
-			&adapter->msix_entries[adapter->iwarp_base_vector];
-
-	mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
-			       struct netdev_hw_addr, list);
-	if (mac)
-		ether_addr_copy(cinst->lan_info.lanmac, mac->addr);
-	else
-		dev_err(&adapter->pdev->dev, "MAC address list is empty!\n");
-
-	cinst->client = vf_registered_client;
-	adapter->cinst = cinst;
-out:
-	return cinst;
-}
-
-/**
- * i40evf_client_del_instance - removes a client instance from the list
- * @adapter: pointer to the board struct
- *
- **/
-static
-void i40evf_client_del_instance(struct i40evf_adapter *adapter)
-{
-	kfree(adapter->cinst);
-	adapter->cinst = NULL;
-}
-
-/**
- * i40evf_client_subtask - client maintenance work
- * @adapter: board private structure
- **/
-void i40evf_client_subtask(struct i40evf_adapter *adapter)
-{
-	struct i40e_client *client = vf_registered_client;
-	struct i40e_client_instance *cinst;
-	int ret = 0;
-
-	if (adapter->state < __I40EVF_DOWN)
-		return;
-
-	/* first check client is registered */
-	if (!client)
-		return;
-
-	/* Add the client instance to the instance list */
-	cinst = i40evf_client_add_instance(adapter);
-	if (!cinst)
-		return;
-
-	dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
-		 client->name);
-
-	if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
-		/* Send an Open request to the client */
-
-		if (client->ops && client->ops->open)
-			ret = client->ops->open(&cinst->lan_info, client);
-		if (!ret)
-			set_bit(__I40E_CLIENT_INSTANCE_OPENED,
-				&cinst->state);
-		else
-			/* remove client instance */
-			i40evf_client_del_instance(adapter);
-	}
-}
-
-/**
- * i40evf_lan_add_device - add a lan device struct to the list of lan devices
- * @adapter: pointer to the board struct
- *
- * Returns 0 on success or none 0 on error
- **/
-int i40evf_lan_add_device(struct i40evf_adapter *adapter)
-{
-	struct i40e_device *ldev;
-	int ret = 0;
-
-	mutex_lock(&i40evf_device_mutex);
-	list_for_each_entry(ldev, &i40evf_devices, list) {
-		if (ldev->vf == adapter) {
-			ret = -EEXIST;
-			goto out;
-		}
-	}
-	ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
-	if (!ldev) {
-		ret = -ENOMEM;
-		goto out;
-	}
-	ldev->vf = adapter;
-	INIT_LIST_HEAD(&ldev->list);
-	list_add(&ldev->list, &i40evf_devices);
-	dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
-		 adapter->hw.bus.bus_id, adapter->hw.bus.device,
-		 adapter->hw.bus.func);
-
-	/* Since in some cases register may have happened before a device gets
-	 * added, we can schedule a subtask to go initiate the clients.
-	 */
-	adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
-
-out:
-	mutex_unlock(&i40evf_device_mutex);
-	return ret;
-}
-
-/**
- * i40evf_lan_del_device - removes a lan device from the device list
- * @adapter: pointer to the board struct
- *
- * Returns 0 on success or non-0 on error
- **/
-int i40evf_lan_del_device(struct i40evf_adapter *adapter)
-{
-	struct i40e_device *ldev, *tmp;
-	int ret = -ENODEV;
-
-	mutex_lock(&i40evf_device_mutex);
-	list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
-		if (ldev->vf == adapter) {
-			dev_info(&adapter->pdev->dev,
-				 "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
-				 adapter->hw.bus.bus_id, adapter->hw.bus.device,
-				 adapter->hw.bus.func);
-			list_del(&ldev->list);
-			kfree(ldev);
-			ret = 0;
-			break;
-		}
-	}
-
-	mutex_unlock(&i40evf_device_mutex);
-	return ret;
-}
-
-/**
- * i40evf_client_release - release client specific resources
- * @client: pointer to the registered client
- *
- **/
-static void i40evf_client_release(struct i40e_client *client)
-{
-	struct i40e_client_instance *cinst;
-	struct i40e_device *ldev;
-	struct i40evf_adapter *adapter;
-
-	mutex_lock(&i40evf_device_mutex);
-	list_for_each_entry(ldev, &i40evf_devices, list) {
-		adapter = ldev->vf;
-		cinst = adapter->cinst;
-		if (!cinst)
-			continue;
-		if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
-			if (client->ops && client->ops->close)
-				client->ops->close(&cinst->lan_info, client,
-						   false);
-			i40evf_client_release_qvlist(&cinst->lan_info);
-			clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
-
-			dev_warn(&adapter->pdev->dev,
-				 "Client %s instance closed\n", client->name);
-		}
-		/* delete the client instance */
-		i40evf_client_del_instance(adapter);
-		dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
-			 client->name);
-	}
-	mutex_unlock(&i40evf_device_mutex);
-}
-
-/**
- * i40evf_client_prepare - prepare client specific resources
- * @client: pointer to the registered client
- *
- **/
-static void i40evf_client_prepare(struct i40e_client *client)
-{
-	struct i40e_device *ldev;
-	struct i40evf_adapter *adapter;
-
-	mutex_lock(&i40evf_device_mutex);
-	list_for_each_entry(ldev, &i40evf_devices, list) {
-		adapter = ldev->vf;
-		/* Signal the watchdog to service the client */
-		adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
-	}
-	mutex_unlock(&i40evf_device_mutex);
-}
-
-/**
- * i40evf_client_virtchnl_send - send a message to the PF instance
- * @ldev: pointer to L2 context.
- * @client: Client pointer.
- * @msg: pointer to message buffer
- * @len: message length
- *
- * Return 0 on success or < 0 on error
- **/
-static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
-				       struct i40e_client *client,
-				       u8 *msg, u16 len)
-{
-	struct i40evf_adapter *adapter = ldev->vf;
-	i40e_status err;
-
-	if (adapter->aq_required)
-		return -EAGAIN;
-
-	err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
-				     I40E_SUCCESS, msg, len, NULL);
-	if (err)
-		dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
-			err, adapter->hw.aq.asq_last_status);
-
-	return err;
-}
-
-/**
- * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
- * @ldev: pointer to L2 context.
- * @client: Client pointer.
- * @qvlist_info: queue and vector list
- *
- * Return 0 on success or < 0 on error
- **/
-static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
-				      struct i40e_client *client,
-				      struct i40e_qvlist_info *qvlist_info)
-{
-	struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
-	struct i40evf_adapter *adapter = ldev->vf;
-	struct i40e_qv_info *qv_info;
-	i40e_status err;
-	u32 v_idx, i;
-	u32 msg_size;
-
-	if (adapter->aq_required)
-		return -EAGAIN;
-
-	/* A quick check on whether the vectors belong to the client */
-	for (i = 0; i < qvlist_info->num_vectors; i++) {
-		qv_info = &qvlist_info->qv_info[i];
-		if (!qv_info)
-			continue;
-		v_idx = qv_info->v_idx;
-		if ((v_idx >=
-		    (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
-		    (v_idx < adapter->iwarp_base_vector))
-			return -EINVAL;
-	}
-
-	v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
-	msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) +
-			(sizeof(struct virtchnl_iwarp_qv_info) *
-			(v_qvlist_info->num_vectors - 1));
-
-	adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
-	err = i40e_aq_send_msg_to_pf(&adapter->hw,
-			VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
-			I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
-
-	if (err) {
-		dev_err(&adapter->pdev->dev,
-			"Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
-			err, adapter->hw.aq.asq_last_status);
-		goto out;
-	}
-
-	err = -EBUSY;
-	for (i = 0; i < 5; i++) {
-		msleep(100);
-		if (!(adapter->client_pending &
-		      BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
-			err = 0;
-			break;
-		}
-	}
-out:
-	return err;
-}
-
-/**
- * i40evf_register_client - Register a i40e client driver with the L2 driver
- * @client: pointer to the i40e_client struct
- *
- * Returns 0 on success or non-0 on error
- **/
-int i40evf_register_client(struct i40e_client *client)
-{
-	int ret = 0;
-
-	if (!client) {
-		ret = -EIO;
-		goto out;
-	}
-
-	if (strlen(client->name) == 0) {
-		pr_info("i40evf: Failed to register client with no name\n");
-		ret = -EIO;
-		goto out;
-	}
-
-	if (vf_registered_client) {
-		pr_info("i40evf: Client %s has already been registered!\n",
-			client->name);
-		ret = -EEXIST;
-		goto out;
-	}
-
-	if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
-	    (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
-		pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
-			client->name);
-		pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
-			client->version.major, client->version.minor,
-			client->version.build,
-			i40evf_client_interface_version_str);
-		ret = -EIO;
-		goto out;
-	}
-
-	vf_registered_client = client;
-
-	i40evf_client_prepare(client);
-
-	pr_info("i40evf: Registered client %s with return code %d\n",
-		client->name, ret);
-out:
-	return ret;
-}
-EXPORT_SYMBOL(i40evf_register_client);
-
-/**
- * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
- * @client: pointer to the i40e_client struct
- *
- * Returns 0 on success or non-0 on error
- **/
-int i40evf_unregister_client(struct i40e_client *client)
-{
-	int ret = 0;
-
-	/* When a unregister request comes through we would have to send
-	 * a close for each of the client instances that were opened.
-	 * client_release function is called to handle this.
-	 */
-	i40evf_client_release(client);
-
-	if (vf_registered_client != client) {
-		pr_info("i40evf: Client %s has not been registered\n",
-			client->name);
-		ret = -ENODEV;
-		goto out;
-	}
-	vf_registered_client = NULL;
-	pr_info("i40evf: Unregistered client %s\n", client->name);
-out:
-	return ret;
-}
-EXPORT_SYMBOL(i40evf_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
deleted file mode 100644
index 5585f362048a..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40EVF_CLIENT_H_
-#define _I40EVF_CLIENT_H_
-
-#define I40EVF_CLIENT_STR_LENGTH 10
-
-/* Client interface version should be updated anytime there is a change in the
- * existing APIs or data structures.
- */
-#define I40EVF_CLIENT_VERSION_MAJOR 0
-#define I40EVF_CLIENT_VERSION_MINOR 01
-#define I40EVF_CLIENT_VERSION_BUILD 00
-#define I40EVF_CLIENT_VERSION_STR     \
-	__stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
-	__stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
-	__stringify(I40EVF_CLIENT_VERSION_BUILD)
-
-struct i40e_client_version {
-	u8 major;
-	u8 minor;
-	u8 build;
-	u8 rsvd;
-};
-
-enum i40e_client_state {
-	__I40E_CLIENT_NULL,
-	__I40E_CLIENT_REGISTERED
-};
-
-enum i40e_client_instance_state {
-	__I40E_CLIENT_INSTANCE_NONE,
-	__I40E_CLIENT_INSTANCE_OPENED,
-};
-
-struct i40e_ops;
-struct i40e_client;
-
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
- */
-#define I40E_QUEUE_TYPE_PE_AEQ  0x80
-#define I40E_QUEUE_INVALID_IDX	0xFFFF
-
-struct i40e_qv_info {
-	u32 v_idx; /* msix_vector */
-	u16 ceq_idx;
-	u16 aeq_idx;
-	u8 itr_idx;
-};
-
-struct i40e_qvlist_info {
-	u32 num_vectors;
-	struct i40e_qv_info qv_info[1];
-};
-
-#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
-
-/* set of LAN parameters useful for clients managed by LAN */
-
-/* Struct to hold per priority info */
-struct i40e_prio_qos_params {
-	u16 qs_handle; /* qs handle for prio */
-	u8 tc; /* TC mapped to prio */
-	u8 reserved;
-};
-
-#define I40E_CLIENT_MAX_USER_PRIORITY        8
-/* Struct to hold Client QoS */
-struct i40e_qos_params {
-	struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
-};
-
-struct i40e_params {
-	struct i40e_qos_params qos;
-	u16 mtu;
-	u16 link_up; /* boolean */
-};
-
-/* Structure to hold LAN device info for a client device */
-struct i40e_info {
-	struct i40e_client_version version;
-	u8 lanmac[6];
-	struct net_device *netdev;
-	struct pci_dev *pcidev;
-	u8 __iomem *hw_addr;
-	u8 fid;	/* function id, PF id or VF id */
-#define I40E_CLIENT_FTYPE_PF 0
-#define I40E_CLIENT_FTYPE_VF 1
-	u8 ftype; /* function type, PF or VF */
-	void *vf; /* cast to i40evf_adapter */
-
-	/* All L2 params that could change during the life span of the device
-	 * and needs to be communicated to the client when they change
-	 */
-	struct i40e_params params;
-	struct i40e_ops *ops;
-
-	u16 msix_count;	 /* number of msix vectors*/
-	/* Array down below will be dynamically allocated based on msix_count */
-	struct msix_entry *msix_entries;
-	u16 itr_index; /* Which ITR index the PE driver is suppose to use */
-};
-
-struct i40e_ops {
-	/* setup_q_vector_list enables queues with a particular vector */
-	int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
-			    struct i40e_qvlist_info *qv_info);
-
-	u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
-			     u8 *msg, u16 len);
-
-	/* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
-	void (*request_reset)(struct i40e_info *ldev,
-			      struct i40e_client *client);
-};
-
-struct i40e_client_ops {
-	/* Should be called from register_client() or whenever the driver is
-	 * ready to create a specific client instance.
-	 */
-	int (*open)(struct i40e_info *ldev, struct i40e_client *client);
-
-	/* Should be closed when netdev is unavailable or when unregister
-	 * call comes in. If the close happens due to a reset, set the reset
-	 * bit to true.
-	 */
-	void (*close)(struct i40e_info *ldev, struct i40e_client *client,
-		      bool reset);
-
-	/* called when some l2 managed parameters changes - mss */
-	void (*l2_param_change)(struct i40e_info *ldev,
-				struct i40e_client *client,
-				struct i40e_params *params);
-
-	/* called when a message is received from the PF */
-	int (*virtchnl_receive)(struct i40e_info *ldev,
-				struct i40e_client *client,
-				u8 *msg, u16 len);
-};
-
-/* Client device */
-struct i40e_client_instance {
-	struct list_head list;
-	struct i40e_info lan_info;
-	struct i40e_client *client;
-	unsigned long  state;
-};
-
-struct i40e_client {
-	struct list_head list;		/* list of registered clients */
-	char name[I40EVF_CLIENT_STR_LENGTH];
-	struct i40e_client_version version;
-	unsigned long state;		/* client state */
-	atomic_t ref_cnt;  /* Count of all the client devices of this kind */
-	u32 flags;
-#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE	BIT(0)
-#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS	BIT(2)
-	u8 type;
-#define I40E_CLIENT_IWARP 0
-	struct i40e_client_ops *ops;	/* client ops provided by the client */
-};
-
-/* used by clients */
-int i40evf_register_client(struct i40e_client *client);
-int i40evf_unregister_client(struct i40e_client *client);
-#endif /* _I40EVF_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
deleted file mode 100644
index 69efe0aec76a..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ /dev/null
@@ -1,820 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-/* ethtool support for i40evf */
-#include "i40evf.h"
-
-#include <linux/uaccess.h>
-
-struct i40evf_stats {
-	char stat_string[ETH_GSTRING_LEN];
-	int stat_offset;
-};
-
-#define I40EVF_STAT(_name, _stat) { \
-	.stat_string = _name, \
-	.stat_offset = offsetof(struct i40evf_adapter, _stat) \
-}
-
-/* All stats are u64, so we don't need to track the size of the field. */
-static const struct i40evf_stats i40evf_gstrings_stats[] = {
-	I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
-	I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
-	I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
-	I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
-	I40EVF_STAT("rx_discards", current_stats.rx_discards),
-	I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
-	I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
-	I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
-	I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
-	I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
-	I40EVF_STAT("tx_discards", current_stats.tx_discards),
-	I40EVF_STAT("tx_errors", current_stats.tx_errors),
-};
-
-#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
-#define I40EVF_QUEUE_STATS_LEN(_dev) \
-	(((struct i40evf_adapter *)\
-		netdev_priv(_dev))->num_active_queues \
-		  * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
-#define I40EVF_STATS_LEN(_dev) \
-	(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
-
-/* For now we have one and only one private flag and it is only defined
- * when we have support for the SKIP_CPU_SYNC DMA attribute.  Instead
- * of leaving all this code sitting around empty we will strip it unless
- * our one private flag is actually available.
- */
-struct i40evf_priv_flags {
-	char flag_string[ETH_GSTRING_LEN];
-	u32 flag;
-	bool read_only;
-};
-
-#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \
-	.flag_string = _name, \
-	.flag = _flag, \
-	.read_only = _read_only, \
-}
-
-static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = {
-	I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0),
-};
-
-#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags)
-
-/**
- * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
- * @netdev: network interface device structure
- * @cmd: ethtool command
- *
- * Reports speed/duplex settings. Because this is a VF, we don't know what
- * kind of link we really have, so we fake it.
- **/
-static int i40evf_get_link_ksettings(struct net_device *netdev,
-				     struct ethtool_link_ksettings *cmd)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	ethtool_link_ksettings_zero_link_mode(cmd, supported);
-	cmd->base.autoneg = AUTONEG_DISABLE;
-	cmd->base.port = PORT_NONE;
-	/* Set speed and duplex */
-	switch (adapter->link_speed) {
-	case I40E_LINK_SPEED_40GB:
-		cmd->base.speed = SPEED_40000;
-		break;
-	case I40E_LINK_SPEED_25GB:
-#ifdef SPEED_25000
-		cmd->base.speed = SPEED_25000;
-#else
-		netdev_info(netdev,
-			    "Speed is 25G, display not supported by this version of ethtool.\n");
-#endif
-		break;
-	case I40E_LINK_SPEED_20GB:
-		cmd->base.speed = SPEED_20000;
-		break;
-	case I40E_LINK_SPEED_10GB:
-		cmd->base.speed = SPEED_10000;
-		break;
-	case I40E_LINK_SPEED_1GB:
-		cmd->base.speed = SPEED_1000;
-		break;
-	case I40E_LINK_SPEED_100MB:
-		cmd->base.speed = SPEED_100;
-		break;
-	default:
-		break;
-	}
-	cmd->base.duplex = DUPLEX_FULL;
-
-	return 0;
-}
-
-/**
- * i40evf_get_sset_count - Get length of string set
- * @netdev: network interface device structure
- * @sset: id of string set
- *
- * Reports size of string table. This driver only supports
- * strings for statistics.
- **/
-static int i40evf_get_sset_count(struct net_device *netdev, int sset)
-{
-	if (sset == ETH_SS_STATS)
-		return I40EVF_STATS_LEN(netdev);
-	else if (sset == ETH_SS_PRIV_FLAGS)
-		return I40EVF_PRIV_FLAGS_STR_LEN;
-	else
-		return -EINVAL;
-}
-
-/**
- * i40evf_get_ethtool_stats - report device statistics
- * @netdev: network interface device structure
- * @stats: ethtool statistics structure
- * @data: pointer to data buffer
- *
- * All statistics are added to the data buffer as an array of u64.
- **/
-static void i40evf_get_ethtool_stats(struct net_device *netdev,
-				     struct ethtool_stats *stats, u64 *data)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	unsigned int i, j;
-	char *p;
-
-	for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
-		p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
-		data[i] =  *(u64 *)p;
-	}
-	for (j = 0; j < adapter->num_active_queues; j++) {
-		data[i++] = adapter->tx_rings[j].stats.packets;
-		data[i++] = adapter->tx_rings[j].stats.bytes;
-	}
-	for (j = 0; j < adapter->num_active_queues; j++) {
-		data[i++] = adapter->rx_rings[j].stats.packets;
-		data[i++] = adapter->rx_rings[j].stats.bytes;
-	}
-}
-
-/**
- * i40evf_get_strings - Get string set
- * @netdev: network interface device structure
- * @sset: id of string set
- * @data: buffer for string data
- *
- * Builds stats string table.
- **/
-static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	u8 *p = data;
-	int i;
-
-	if (sset == ETH_SS_STATS) {
-		for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) {
-			memcpy(p, i40evf_gstrings_stats[i].stat_string,
-			       ETH_GSTRING_LEN);
-			p += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < adapter->num_active_queues; i++) {
-			snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
-			p += ETH_GSTRING_LEN;
-			snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
-			p += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < adapter->num_active_queues; i++) {
-			snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
-			p += ETH_GSTRING_LEN;
-			snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
-			p += ETH_GSTRING_LEN;
-		}
-	} else if (sset == ETH_SS_PRIV_FLAGS) {
-		for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
-			snprintf(p, ETH_GSTRING_LEN, "%s",
-				 i40evf_gstrings_priv_flags[i].flag_string);
-			p += ETH_GSTRING_LEN;
-		}
-	}
-}
-
-/**
- * i40evf_get_priv_flags - report device private flags
- * @netdev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned.  Add new strings for each flag to the i40e_gstrings_priv_flags
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 i40evf_get_priv_flags(struct net_device *netdev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	u32 i, ret_flags = 0;
-
-	for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
-		const struct i40evf_priv_flags *priv_flags;
-
-		priv_flags = &i40evf_gstrings_priv_flags[i];
-
-		if (priv_flags->flag & adapter->flags)
-			ret_flags |= BIT(i);
-	}
-
-	return ret_flags;
-}
-
-/**
- * i40evf_set_priv_flags - set private flags
- * @netdev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	u32 orig_flags, new_flags, changed_flags;
-	u32 i;
-
-	orig_flags = READ_ONCE(adapter->flags);
-	new_flags = orig_flags;
-
-	for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
-		const struct i40evf_priv_flags *priv_flags;
-
-		priv_flags = &i40evf_gstrings_priv_flags[i];
-
-		if (flags & BIT(i))
-			new_flags |= priv_flags->flag;
-		else
-			new_flags &= ~(priv_flags->flag);
-
-		if (priv_flags->read_only &&
-		    ((orig_flags ^ new_flags) & ~BIT(i)))
-			return -EOPNOTSUPP;
-	}
-
-	/* Before we finalize any flag changes, any checks which we need to
-	 * perform to determine if the new flags will be supported should go
-	 * here...
-	 */
-
-	/* Compare and exchange the new flags into place. If we failed, that
-	 * is if cmpxchg returns anything but the old value, this means
-	 * something else must have modified the flags variable since we
-	 * copied it. We'll just punt with an error and log something in the
-	 * message buffer.
-	 */
-	if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
-		dev_warn(&adapter->pdev->dev,
-			 "Unable to update adapter->flags as it was modified by another thread...\n");
-		return -EAGAIN;
-	}
-
-	changed_flags = orig_flags ^ new_flags;
-
-	/* Process any additional changes needed as a result of flag changes.
-	 * The changed_flags value reflects the list of bits that were changed
-	 * in the code above.
-	 */
-
-	/* issue a reset to force legacy-rx change to take effect */
-	if (changed_flags & I40EVF_FLAG_LEGACY_RX) {
-		if (netif_running(netdev)) {
-			adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
-			schedule_work(&adapter->reset_task);
-		}
-	}
-
-	return 0;
-}
-
-/**
- * i40evf_get_msglevel - Get debug message level
- * @netdev: network interface device structure
- *
- * Returns current debug message level.
- **/
-static u32 i40evf_get_msglevel(struct net_device *netdev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	return adapter->msg_enable;
-}
-
-/**
- * i40evf_set_msglevel - Set debug message level
- * @netdev: network interface device structure
- * @data: message level
- *
- * Set current debug message level. Higher values cause the driver to
- * be noisier.
- **/
-static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	if (I40E_DEBUG_USER & data)
-		adapter->hw.debug_mask = data;
-	adapter->msg_enable = data;
-}
-
-/**
- * i40evf_get_drvinfo - Get driver info
- * @netdev: network interface device structure
- * @drvinfo: ethool driver info structure
- *
- * Returns information about the driver and device for display to the user.
- **/
-static void i40evf_get_drvinfo(struct net_device *netdev,
-			       struct ethtool_drvinfo *drvinfo)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	strlcpy(drvinfo->driver, i40evf_driver_name, 32);
-	strlcpy(drvinfo->version, i40evf_driver_version, 32);
-	strlcpy(drvinfo->fw_version, "N/A", 4);
-	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
-	drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
-}
-
-/**
- * i40evf_get_ringparam - Get ring parameters
- * @netdev: network interface device structure
- * @ring: ethtool ringparam structure
- *
- * Returns current ring parameters. TX and RX rings are reported separately,
- * but the number of rings is not reported.
- **/
-static void i40evf_get_ringparam(struct net_device *netdev,
-				 struct ethtool_ringparam *ring)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	ring->rx_max_pending = I40EVF_MAX_RXD;
-	ring->tx_max_pending = I40EVF_MAX_TXD;
-	ring->rx_pending = adapter->rx_desc_count;
-	ring->tx_pending = adapter->tx_desc_count;
-}
-
-/**
- * i40evf_set_ringparam - Set ring parameters
- * @netdev: network interface device structure
- * @ring: ethtool ringparam structure
- *
- * Sets ring parameters. TX and RX rings are controlled separately, but the
- * number of rings is not specified, so all rings get the same settings.
- **/
-static int i40evf_set_ringparam(struct net_device *netdev,
-				struct ethtool_ringparam *ring)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	u32 new_rx_count, new_tx_count;
-
-	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
-		return -EINVAL;
-
-	new_tx_count = clamp_t(u32, ring->tx_pending,
-			       I40EVF_MIN_TXD,
-			       I40EVF_MAX_TXD);
-	new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
-
-	new_rx_count = clamp_t(u32, ring->rx_pending,
-			       I40EVF_MIN_RXD,
-			       I40EVF_MAX_RXD);
-	new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
-
-	/* if nothing to do return success */
-	if ((new_tx_count == adapter->tx_desc_count) &&
-	    (new_rx_count == adapter->rx_desc_count))
-		return 0;
-
-	adapter->tx_desc_count = new_tx_count;
-	adapter->rx_desc_count = new_rx_count;
-
-	if (netif_running(netdev)) {
-		adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
-		schedule_work(&adapter->reset_task);
-	}
-
-	return 0;
-}
-
-/**
- * __i40evf_get_coalesce - get per-queue coalesce settings
- * @netdev: the netdev to check
- * @ec: ethtool coalesce data structure
- * @queue: which queue to pick
- *
- * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
- * are per queue. If queue is <0 then we default to queue 0 as the
- * representative value.
- **/
-static int __i40evf_get_coalesce(struct net_device *netdev,
-				 struct ethtool_coalesce *ec,
-				 int queue)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct i40e_vsi *vsi = &adapter->vsi;
-	struct i40e_ring *rx_ring, *tx_ring;
-
-	ec->tx_max_coalesced_frames = vsi->work_limit;
-	ec->rx_max_coalesced_frames = vsi->work_limit;
-
-	/* Rx and Tx usecs per queue value. If user doesn't specify the
-	 * queue, return queue 0's value to represent.
-	 */
-	if (queue < 0)
-		queue = 0;
-	else if (queue >= adapter->num_active_queues)
-		return -EINVAL;
-
-	rx_ring = &adapter->rx_rings[queue];
-	tx_ring = &adapter->tx_rings[queue];
-
-	if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
-		ec->use_adaptive_rx_coalesce = 1;
-
-	if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
-		ec->use_adaptive_tx_coalesce = 1;
-
-	ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
-	ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
-
-	return 0;
-}
-
-/**
- * i40evf_get_coalesce - Get interrupt coalescing settings
- * @netdev: network interface device structure
- * @ec: ethtool coalesce structure
- *
- * Returns current coalescing settings. This is referred to elsewhere in the
- * driver as Interrupt Throttle Rate, as this is how the hardware describes
- * this functionality. Note that if per-queue settings have been modified this
- * only represents the settings of queue 0.
- **/
-static int i40evf_get_coalesce(struct net_device *netdev,
-			       struct ethtool_coalesce *ec)
-{
-	return __i40evf_get_coalesce(netdev, ec, -1);
-}
-
-/**
- * i40evf_get_per_queue_coalesce - get coalesce values for specific queue
- * @netdev: netdev to read
- * @ec: coalesce settings from ethtool
- * @queue: the queue to read
- *
- * Read specific queue's coalesce settings.
- **/
-static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
-					 u32 queue,
-					 struct ethtool_coalesce *ec)
-{
-	return __i40evf_get_coalesce(netdev, ec, queue);
-}
-
-/**
- * i40evf_set_itr_per_queue - set ITR values for specific queue
- * @adapter: the VF adapter struct to set values for
- * @ec: coalesce settings from ethtool
- * @queue: the queue to modify
- *
- * Change the ITR settings for a specific queue.
- **/
-static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
-				     struct ethtool_coalesce *ec,
-				     int queue)
-{
-	struct i40e_ring *rx_ring = &adapter->rx_rings[queue];
-	struct i40e_ring *tx_ring = &adapter->tx_rings[queue];
-	struct i40e_q_vector *q_vector;
-
-	rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
-	tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
-
-	rx_ring->itr_setting |= I40E_ITR_DYNAMIC;
-	if (!ec->use_adaptive_rx_coalesce)
-		rx_ring->itr_setting ^= I40E_ITR_DYNAMIC;
-
-	tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
-	if (!ec->use_adaptive_tx_coalesce)
-		tx_ring->itr_setting ^= I40E_ITR_DYNAMIC;
-
-	q_vector = rx_ring->q_vector;
-	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
-
-	q_vector = tx_ring->q_vector;
-	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
-
-	/* The interrupt handler itself will take care of programming
-	 * the Tx and Rx ITR values based on the values we have entered
-	 * into the q_vector, no need to write the values now.
-	 */
-}
-
-/**
- * __i40evf_set_coalesce - set coalesce settings for particular queue
- * @netdev: the netdev to change
- * @ec: ethtool coalesce settings
- * @queue: the queue to change
- *
- * Sets the coalesce settings for a particular queue.
- **/
-static int __i40evf_set_coalesce(struct net_device *netdev,
-				 struct ethtool_coalesce *ec,
-				 int queue)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct i40e_vsi *vsi = &adapter->vsi;
-	int i;
-
-	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
-		vsi->work_limit = ec->tx_max_coalesced_frames_irq;
-
-	if (ec->rx_coalesce_usecs == 0) {
-		if (ec->use_adaptive_rx_coalesce)
-			netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
-	} else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) ||
-		   (ec->rx_coalesce_usecs > I40E_MAX_ITR)) {
-		netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
-		return -EINVAL;
-	}
-
-	else
-	if (ec->tx_coalesce_usecs == 0) {
-		if (ec->use_adaptive_tx_coalesce)
-			netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
-	} else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) ||
-		   (ec->tx_coalesce_usecs > I40E_MAX_ITR)) {
-		netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
-		return -EINVAL;
-	}
-
-	/* Rx and Tx usecs has per queue value. If user doesn't specify the
-	 * queue, apply to all queues.
-	 */
-	if (queue < 0) {
-		for (i = 0; i < adapter->num_active_queues; i++)
-			i40evf_set_itr_per_queue(adapter, ec, i);
-	} else if (queue < adapter->num_active_queues) {
-		i40evf_set_itr_per_queue(adapter, ec, queue);
-	} else {
-		netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
-			   adapter->num_active_queues - 1);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/**
- * i40evf_set_coalesce - Set interrupt coalescing settings
- * @netdev: network interface device structure
- * @ec: ethtool coalesce structure
- *
- * Change current coalescing settings for every queue.
- **/
-static int i40evf_set_coalesce(struct net_device *netdev,
-			       struct ethtool_coalesce *ec)
-{
-	return __i40evf_set_coalesce(netdev, ec, -1);
-}
-
-/**
- * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings
- * @netdev: the netdev to change
- * @ec: ethtool's coalesce settings
- * @queue: the queue to modify
- *
- * Modifies a specific queue's coalesce settings.
- */
-static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
-					 u32 queue,
-					 struct ethtool_coalesce *ec)
-{
-	return __i40evf_set_coalesce(netdev, ec, queue);
-}
-
-/**
- * i40evf_get_rxnfc - command to get RX flow classification rules
- * @netdev: network interface device structure
- * @cmd: ethtool rxnfc command
- * @rule_locs: pointer to store rule locations
- *
- * Returns Success if the command is supported.
- **/
-static int i40evf_get_rxnfc(struct net_device *netdev,
-			    struct ethtool_rxnfc *cmd,
-			    u32 *rule_locs)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	int ret = -EOPNOTSUPP;
-
-	switch (cmd->cmd) {
-	case ETHTOOL_GRXRINGS:
-		cmd->data = adapter->num_active_queues;
-		ret = 0;
-		break;
-	case ETHTOOL_GRXFH:
-		netdev_info(netdev,
-			    "RSS hash info is not available to vf, use pf.\n");
-		break;
-	default:
-		break;
-	}
-
-	return ret;
-}
-/**
- * i40evf_get_channels: get the number of channels supported by the device
- * @netdev: network interface device structure
- * @ch: channel information structure
- *
- * For the purposes of our device, we only use combined channels, i.e. a tx/rx
- * queue pair. Report one extra channel to match our "other" MSI-X vector.
- **/
-static void i40evf_get_channels(struct net_device *netdev,
-				struct ethtool_channels *ch)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	/* Report maximum channels */
-	ch->max_combined = I40EVF_MAX_REQ_QUEUES;
-
-	ch->max_other = NONQ_VECS;
-	ch->other_count = NONQ_VECS;
-
-	ch->combined_count = adapter->num_active_queues;
-}
-
-/**
- * i40evf_set_channels: set the new channel count
- * @netdev: network interface device structure
- * @ch: channel information structure
- *
- * Negotiate a new number of channels with the PF then do a reset.  During
- * reset we'll realloc queues and fix the RSS table.  Returns 0 on success,
- * negative on failure.
- **/
-static int i40evf_set_channels(struct net_device *netdev,
-			       struct ethtool_channels *ch)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	int num_req = ch->combined_count;
-
-	if (num_req != adapter->num_active_queues &&
-	    !(adapter->vf_res->vf_cap_flags &
-	      VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
-		dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
-		return -EINVAL;
-	}
-
-	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
-	    adapter->num_tc) {
-		dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
-		return -EINVAL;
-	}
-
-	/* All of these should have already been checked by ethtool before this
-	 * even gets to us, but just to be sure.
-	 */
-	if (num_req <= 0 || num_req > I40EVF_MAX_REQ_QUEUES)
-		return -EINVAL;
-
-	if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
-		return -EINVAL;
-
-	adapter->num_req_queues = num_req;
-	return i40evf_request_queues(adapter, num_req);
-}
-
-/**
- * i40evf_get_rxfh_key_size - get the RSS hash key size
- * @netdev: network interface device structure
- *
- * Returns the table size.
- **/
-static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	return adapter->rss_key_size;
-}
-
-/**
- * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
- * @netdev: network interface device structure
- *
- * Returns the table size.
- **/
-static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	return adapter->rss_lut_size;
-}
-
-/**
- * i40evf_get_rxfh - get the rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function in use
- *
- * Reads the indirection table directly from the hardware. Always returns 0.
- **/
-static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
-			   u8 *hfunc)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	u16 i;
-
-	if (hfunc)
-		*hfunc = ETH_RSS_HASH_TOP;
-	if (!indir)
-		return 0;
-
-	memcpy(key, adapter->rss_key, adapter->rss_key_size);
-
-	/* Each 32 bits pointed by 'indir' is stored with a lut entry */
-	for (i = 0; i < adapter->rss_lut_size; i++)
-		indir[i] = (u32)adapter->rss_lut[i];
-
-	return 0;
-}
-
-/**
- * i40evf_set_rxfh - set the rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
- *
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
- * returns 0 after programming the table.
- **/
-static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
-			   const u8 *key, const u8 hfunc)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	u16 i;
-
-	/* We do not allow change in unsupported parameters */
-	if (key ||
-	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
-		return -EOPNOTSUPP;
-	if (!indir)
-		return 0;
-
-	if (key) {
-		memcpy(adapter->rss_key, key, adapter->rss_key_size);
-	}
-
-	/* Each 32 bits pointed by 'indir' is stored with a lut entry */
-	for (i = 0; i < adapter->rss_lut_size; i++)
-		adapter->rss_lut[i] = (u8)(indir[i]);
-
-	return i40evf_config_rss(adapter);
-}
-
-static const struct ethtool_ops i40evf_ethtool_ops = {
-	.get_drvinfo		= i40evf_get_drvinfo,
-	.get_link		= ethtool_op_get_link,
-	.get_ringparam		= i40evf_get_ringparam,
-	.set_ringparam		= i40evf_set_ringparam,
-	.get_strings		= i40evf_get_strings,
-	.get_ethtool_stats	= i40evf_get_ethtool_stats,
-	.get_sset_count		= i40evf_get_sset_count,
-	.get_priv_flags		= i40evf_get_priv_flags,
-	.set_priv_flags		= i40evf_set_priv_flags,
-	.get_msglevel		= i40evf_get_msglevel,
-	.set_msglevel		= i40evf_set_msglevel,
-	.get_coalesce		= i40evf_get_coalesce,
-	.set_coalesce		= i40evf_set_coalesce,
-	.get_per_queue_coalesce = i40evf_get_per_queue_coalesce,
-	.set_per_queue_coalesce = i40evf_set_per_queue_coalesce,
-	.get_rxnfc		= i40evf_get_rxnfc,
-	.get_rxfh_indir_size	= i40evf_get_rxfh_indir_size,
-	.get_rxfh		= i40evf_get_rxfh,
-	.set_rxfh		= i40evf_set_rxfh,
-	.get_channels		= i40evf_get_channels,
-	.set_channels		= i40evf_set_channels,
-	.get_rxfh_key_size	= i40evf_get_rxfh_key_size,
-	.get_link_ksettings	= i40evf_get_link_ksettings,
-};
-
-/**
- * i40evf_set_ethtool_ops - Initialize ethtool ops struct
- * @netdev: network interface device structure
- *
- * Sets ethtool ops struct in our netdev so that ethtool can call
- * our functions.
- **/
-void i40evf_set_ethtool_ops(struct net_device *netdev)
-{
-	netdev->ethtool_ops = &i40evf_ethtool_ops;
-}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
deleted file mode 100644
index 5a6e579e9e65..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ /dev/null
@@ -1,3989 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#include "i40evf.h"
-#include "i40e_prototype.h"
-#include "i40evf_client.h"
-/* All i40evf tracepoints are defined by the include below, which must
- * be included exactly once across the whole kernel with
- * CREATE_TRACE_POINTS defined
- */
-#define CREATE_TRACE_POINTS
-#include "i40e_trace.h"
-
-static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
-static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
-static int i40evf_close(struct net_device *netdev);
-
-char i40evf_driver_name[] = "i40evf";
-static const char i40evf_driver_string[] =
-	"Intel(R) 40-10 Gigabit Virtual Function Network Driver";
-
-#define DRV_KERN "-k"
-
-#define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 2
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
-	     __stringify(DRV_VERSION_MINOR) "." \
-	     __stringify(DRV_VERSION_BUILD) \
-	     DRV_KERN
-const char i40evf_driver_version[] = DRV_VERSION;
-static const char i40evf_copyright[] =
-	"Copyright (c) 2013 - 2015 Intel Corporation.";
-
-/* i40evf_pci_tbl - PCI Device ID Table
- *
- * Wildcard entries (PCI_ANY_ID) should come last
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
- *   Class, Class Mask, private data (not used) }
- */
-static const struct pci_device_id i40evf_pci_tbl[] = {
-	{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
-	{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
-	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
-	{PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
-	/* required last entry */
-	{0, }
-};
-
-MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
-
-MODULE_AUTHOR("Intel Corporation, <linux.nics@...el.com>");
-MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static struct workqueue_struct *i40evf_wq;
-
-/**
- * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to fill out
- * @size: size of memory requested
- * @alignment: what to align the allocation to
- **/
-i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
-				      struct i40e_dma_mem *mem,
-				      u64 size, u32 alignment)
-{
-	struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
-
-	if (!mem)
-		return I40E_ERR_PARAM;
-
-	mem->size = ALIGN(size, alignment);
-	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
-				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
-	if (mem->va)
-		return 0;
-	else
-		return I40E_ERR_NO_MEMORY;
-}
-
-/**
- * i40evf_free_dma_mem_d - OS specific memory free for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to free
- **/
-i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
-{
-	struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
-
-	if (!mem || !mem->va)
-		return I40E_ERR_PARAM;
-	dma_free_coherent(&adapter->pdev->dev, mem->size,
-			  mem->va, (dma_addr_t)mem->pa);
-	return 0;
-}
-
-/**
- * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to fill out
- * @size: size of memory requested
- **/
-i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
-				       struct i40e_virt_mem *mem, u32 size)
-{
-	if (!mem)
-		return I40E_ERR_PARAM;
-
-	mem->size = size;
-	mem->va = kzalloc(size, GFP_KERNEL);
-
-	if (mem->va)
-		return 0;
-	else
-		return I40E_ERR_NO_MEMORY;
-}
-
-/**
- * i40evf_free_virt_mem_d - OS specific memory free for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to free
- **/
-i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
-				   struct i40e_virt_mem *mem)
-{
-	if (!mem)
-		return I40E_ERR_PARAM;
-
-	/* it's ok to kfree a NULL pointer */
-	kfree(mem->va);
-
-	return 0;
-}
-
-/**
- * i40evf_debug_d - OS dependent version of debug printing
- * @hw:  pointer to the HW structure
- * @mask: debug level mask
- * @fmt_str: printf-type format description
- **/
-void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
-{
-	char buf[512];
-	va_list argptr;
-
-	if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
-		return;
-
-	va_start(argptr, fmt_str);
-	vsnprintf(buf, sizeof(buf), fmt_str, argptr);
-	va_end(argptr);
-
-	/* the debug string is already formatted with a newline */
-	pr_info("%s", buf);
-}
-
-/**
- * i40evf_schedule_reset - Set the flags and schedule a reset event
- * @adapter: board private structure
- **/
-void i40evf_schedule_reset(struct i40evf_adapter *adapter)
-{
-	if (!(adapter->flags &
-	      (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
-		adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
-		schedule_work(&adapter->reset_task);
-	}
-}
-
-/**
- * i40evf_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- **/
-static void i40evf_tx_timeout(struct net_device *netdev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	adapter->tx_timeout_count++;
-	i40evf_schedule_reset(adapter);
-}
-
-/**
- * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- **/
-static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
-{
-	struct i40e_hw *hw = &adapter->hw;
-
-	if (!adapter->msix_entries)
-		return;
-
-	wr32(hw, I40E_VFINT_DYN_CTL01, 0);
-
-	/* read flush */
-	rd32(hw, I40E_VFGEN_RSTAT);
-
-	synchronize_irq(adapter->msix_entries[0].vector);
-}
-
-/**
- * i40evf_misc_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- **/
-static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
-{
-	struct i40e_hw *hw = &adapter->hw;
-
-	wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
-				       I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
-	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
-
-	/* read flush */
-	rd32(hw, I40E_VFGEN_RSTAT);
-}
-
-/**
- * i40evf_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- **/
-static void i40evf_irq_disable(struct i40evf_adapter *adapter)
-{
-	int i;
-	struct i40e_hw *hw = &adapter->hw;
-
-	if (!adapter->msix_entries)
-		return;
-
-	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
-		synchronize_irq(adapter->msix_entries[i].vector);
-	}
-	/* read flush */
-	rd32(hw, I40E_VFGEN_RSTAT);
-}
-
-/**
- * i40evf_irq_enable_queues - Enable interrupt for specified queues
- * @adapter: board private structure
- * @mask: bitmap of queues to enable
- **/
-void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	int i;
-
-	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		if (mask & BIT(i - 1)) {
-			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
-			     I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-			     I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
-		}
-	}
-}
-
-/**
- * i40evf_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- * @flush: boolean value whether to run rd32()
- **/
-void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
-{
-	struct i40e_hw *hw = &adapter->hw;
-
-	i40evf_misc_irq_enable(adapter);
-	i40evf_irq_enable_queues(adapter, ~0);
-
-	if (flush)
-		rd32(hw, I40E_VFGEN_RSTAT);
-}
-
-/**
- * i40evf_msix_aq - Interrupt handler for vector 0
- * @irq: interrupt number
- * @data: pointer to netdev
- **/
-static irqreturn_t i40evf_msix_aq(int irq, void *data)
-{
-	struct net_device *netdev = data;
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct i40e_hw *hw = &adapter->hw;
-
-	/* handle non-queue interrupts, these reads clear the registers */
-	rd32(hw, I40E_VFINT_ICR01);
-	rd32(hw, I40E_VFINT_ICR0_ENA1);
-
-	/* schedule work on the private workqueue */
-	schedule_work(&adapter->adminq_task);
-
-	return IRQ_HANDLED;
-}
-
-/**
- * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a q_vector
- **/
-static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
-{
-	struct i40e_q_vector *q_vector = data;
-
-	if (!q_vector->tx.ring && !q_vector->rx.ring)
-		return IRQ_HANDLED;
-
-	napi_schedule_irqoff(&q_vector->napi);
-
-	return IRQ_HANDLED;
-}
-
-/**
- * i40evf_map_vector_to_rxq - associate irqs with rx queues
- * @adapter: board private structure
- * @v_idx: interrupt number
- * @r_idx: queue number
- **/
-static void
-i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
-{
-	struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
-	struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
-	struct i40e_hw *hw = &adapter->hw;
-
-	rx_ring->q_vector = q_vector;
-	rx_ring->next = q_vector->rx.ring;
-	rx_ring->vsi = &adapter->vsi;
-	q_vector->rx.ring = rx_ring;
-	q_vector->rx.count++;
-	q_vector->rx.next_update = jiffies + 1;
-	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
-	q_vector->ring_mask |= BIT(r_idx);
-	wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx),
-	     q_vector->rx.current_itr);
-	q_vector->rx.current_itr = q_vector->rx.target_itr;
-}
-
-/**
- * i40evf_map_vector_to_txq - associate irqs with tx queues
- * @adapter: board private structure
- * @v_idx: interrupt number
- * @t_idx: queue number
- **/
-static void
-i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
-{
-	struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
-	struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
-	struct i40e_hw *hw = &adapter->hw;
-
-	tx_ring->q_vector = q_vector;
-	tx_ring->next = q_vector->tx.ring;
-	tx_ring->vsi = &adapter->vsi;
-	q_vector->tx.ring = tx_ring;
-	q_vector->tx.count++;
-	q_vector->tx.next_update = jiffies + 1;
-	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
-	q_vector->num_ringpairs++;
-	wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx),
-	     q_vector->tx.target_itr);
-	q_vector->tx.current_itr = q_vector->tx.target_itr;
-}
-
-/**
- * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
- * @adapter: board private structure to initialize
- *
- * This function maps descriptor rings to the queue-specific vectors
- * we were allotted through the MSI-X enabling code.  Ideally, we'd have
- * one vector per ring/queue, but on a constrained vector budget, we
- * group the rings as "efficiently" as possible.  You would add new
- * mapping configurations in here.
- **/
-static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
-{
-	int rings_remaining = adapter->num_active_queues;
-	int ridx = 0, vidx = 0;
-	int q_vectors;
-
-	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-
-	for (; ridx < rings_remaining; ridx++) {
-		i40evf_map_vector_to_rxq(adapter, vidx, ridx);
-		i40evf_map_vector_to_txq(adapter, vidx, ridx);
-
-		/* In the case where we have more queues than vectors, continue
-		 * round-robin on vectors until all queues are mapped.
-		 */
-		if (++vidx >= q_vectors)
-			vidx = 0;
-	}
-
-	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
-}
-
-/**
- * i40evf_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- **/
-static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
-				       const cpumask_t *mask)
-{
-	struct i40e_q_vector *q_vector =
-		container_of(notify, struct i40e_q_vector, affinity_notify);
-
-	cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * i40evf_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- **/
-static void i40evf_irq_affinity_release(struct kref *ref) {}
-
-/**
- * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
- * @adapter: board private structure
- * @basename: device basename
- *
- * Allocates MSI-X vectors for tx and rx handling, and requests
- * interrupts from the kernel.
- **/
-static int
-i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
-{
-	unsigned int vector, q_vectors;
-	unsigned int rx_int_idx = 0, tx_int_idx = 0;
-	int irq_num, err;
-	int cpu;
-
-	i40evf_irq_disable(adapter);
-	/* Decrement for Other and TCP Timer vectors */
-	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-
-	for (vector = 0; vector < q_vectors; vector++) {
-		struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
-		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
-
-		if (q_vector->tx.ring && q_vector->rx.ring) {
-			snprintf(q_vector->name, sizeof(q_vector->name),
-				 "i40evf-%s-TxRx-%d", basename, rx_int_idx++);
-			tx_int_idx++;
-		} else if (q_vector->rx.ring) {
-			snprintf(q_vector->name, sizeof(q_vector->name),
-				 "i40evf-%s-rx-%d", basename, rx_int_idx++);
-		} else if (q_vector->tx.ring) {
-			snprintf(q_vector->name, sizeof(q_vector->name),
-				 "i40evf-%s-tx-%d", basename, tx_int_idx++);
-		} else {
-			/* skip this unused q_vector */
-			continue;
-		}
-		err = request_irq(irq_num,
-				  i40evf_msix_clean_rings,
-				  0,
-				  q_vector->name,
-				  q_vector);
-		if (err) {
-			dev_info(&adapter->pdev->dev,
-				 "Request_irq failed, error: %d\n", err);
-			goto free_queue_irqs;
-		}
-		/* register for affinity change notifications */
-		q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
-		q_vector->affinity_notify.release =
-						   i40evf_irq_affinity_release;
-		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
-		/* Spread the IRQ affinity hints across online CPUs. Note that
-		 * get_cpu_mask returns a mask with a permanent lifetime so
-		 * it's safe to use as a hint for irq_set_affinity_hint.
-		 */
-		cpu = cpumask_local_spread(q_vector->v_idx, -1);
-		irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
-	}
-
-	return 0;
-
-free_queue_irqs:
-	while (vector) {
-		vector--;
-		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
-		irq_set_affinity_notifier(irq_num, NULL);
-		irq_set_affinity_hint(irq_num, NULL);
-		free_irq(irq_num, &adapter->q_vectors[vector]);
-	}
-	return err;
-}
-
-/**
- * i40evf_request_misc_irq - Initialize MSI-X interrupts
- * @adapter: board private structure
- *
- * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
- * vector is only for the admin queue, and stays active even when the netdev
- * is closed.
- **/
-static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-	int err;
-
-	snprintf(adapter->misc_vector_name,
-		 sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
-		 dev_name(&adapter->pdev->dev));
-	err = request_irq(adapter->msix_entries[0].vector,
-			  &i40evf_msix_aq, 0,
-			  adapter->misc_vector_name, netdev);
-	if (err) {
-		dev_err(&adapter->pdev->dev,
-			"request_irq for %s failed: %d\n",
-			adapter->misc_vector_name, err);
-		free_irq(adapter->msix_entries[0].vector, netdev);
-	}
-	return err;
-}
-
-/**
- * i40evf_free_traffic_irqs - Free MSI-X interrupts
- * @adapter: board private structure
- *
- * Frees all MSI-X vectors other than 0.
- **/
-static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
-{
-	int vector, irq_num, q_vectors;
-
-	if (!adapter->msix_entries)
-		return;
-
-	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-
-	for (vector = 0; vector < q_vectors; vector++) {
-		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
-		irq_set_affinity_notifier(irq_num, NULL);
-		irq_set_affinity_hint(irq_num, NULL);
-		free_irq(irq_num, &adapter->q_vectors[vector]);
-	}
-}
-
-/**
- * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
- * @adapter: board private structure
- *
- * Frees MSI-X vector 0.
- **/
-static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-
-	if (!adapter->msix_entries)
-		return;
-
-	free_irq(adapter->msix_entries[0].vector, netdev);
-}
-
-/**
- * i40evf_configure_tx - Configure Transmit Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
- **/
-static void i40evf_configure_tx(struct i40evf_adapter *adapter)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	int i;
-
-	for (i = 0; i < adapter->num_active_queues; i++)
-		adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
-}
-
-/**
- * i40evf_configure_rx - Configure Receive Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Rx unit of the MAC after a reset.
- **/
-static void i40evf_configure_rx(struct i40evf_adapter *adapter)
-{
-	unsigned int rx_buf_len = I40E_RXBUFFER_2048;
-	struct i40e_hw *hw = &adapter->hw;
-	int i;
-
-	/* Legacy Rx will always default to a 2048 buffer size. */
-#if (PAGE_SIZE < 8192)
-	if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
-		struct net_device *netdev = adapter->netdev;
-
-		/* For jumbo frames on systems with 4K pages we have to use
-		 * an order 1 page, so we might as well increase the size
-		 * of our Rx buffer to make better use of the available space
-		 */
-		rx_buf_len = I40E_RXBUFFER_3072;
-
-		/* We use a 1536 buffer size for configurations with
-		 * standard Ethernet mtu.  On x86 this gives us enough room
-		 * for shared info and 192 bytes of padding.
-		 */
-		if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
-		    (netdev->mtu <= ETH_DATA_LEN))
-			rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
-	}
-#endif
-
-	for (i = 0; i < adapter->num_active_queues; i++) {
-		adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
-		adapter->rx_rings[i].rx_buf_len = rx_buf_len;
-
-		if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
-			clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
-		else
-			set_ring_build_skb_enabled(&adapter->rx_rings[i]);
-	}
-}
-
-/**
- * i40evf_find_vlan - Search filter list for specific vlan filter
- * @adapter: board private structure
- * @vlan: vlan tag
- *
- * Returns ptr to the filter object or NULL. Must be called while holding the
- * mac_vlan_list_lock.
- **/
-static struct
-i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
-{
-	struct i40evf_vlan_filter *f;
-
-	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-		if (vlan == f->vlan)
-			return f;
-	}
-	return NULL;
-}
-
-/**
- * i40evf_add_vlan - Add a vlan filter to the list
- * @adapter: board private structure
- * @vlan: VLAN tag
- *
- * Returns ptr to the filter object or NULL when no memory available.
- **/
-static struct
-i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
-{
-	struct i40evf_vlan_filter *f = NULL;
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	f = i40evf_find_vlan(adapter, vlan);
-	if (!f) {
-		f = kzalloc(sizeof(*f), GFP_KERNEL);
-		if (!f)
-			goto clearout;
-
-		f->vlan = vlan;
-
-		INIT_LIST_HEAD(&f->list);
-		list_add(&f->list, &adapter->vlan_filter_list);
-		f->add = true;
-		adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
-	}
-
-clearout:
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-	return f;
-}
-
-/**
- * i40evf_del_vlan - Remove a vlan filter from the list
- * @adapter: board private structure
- * @vlan: VLAN tag
- **/
-static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
-{
-	struct i40evf_vlan_filter *f;
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	f = i40evf_find_vlan(adapter, vlan);
-	if (f) {
-		f->remove = true;
-		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
-	}
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-}
-
-/**
- * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
- * @netdev: network device struct
- * @proto: unused protocol data
- * @vid: VLAN tag
- **/
-static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
-				  __always_unused __be16 proto, u16 vid)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	if (!VLAN_ALLOWED(adapter))
-		return -EIO;
-	if (i40evf_add_vlan(adapter, vid) == NULL)
-		return -ENOMEM;
-	return 0;
-}
-
-/**
- * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
- * @netdev: network device struct
- * @proto: unused protocol data
- * @vid: VLAN tag
- **/
-static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
-				   __always_unused __be16 proto, u16 vid)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	if (VLAN_ALLOWED(adapter)) {
-		i40evf_del_vlan(adapter, vid);
-		return 0;
-	}
-	return -EIO;
-}
-
-/**
- * i40evf_find_filter - Search filter list for specific mac filter
- * @adapter: board private structure
- * @macaddr: the MAC address
- *
- * Returns ptr to the filter object or NULL. Must be called while holding the
- * mac_vlan_list_lock.
- **/
-static struct
-i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
-				      const u8 *macaddr)
-{
-	struct i40evf_mac_filter *f;
-
-	if (!macaddr)
-		return NULL;
-
-	list_for_each_entry(f, &adapter->mac_filter_list, list) {
-		if (ether_addr_equal(macaddr, f->macaddr))
-			return f;
-	}
-	return NULL;
-}
-
-/**
- * i40e_add_filter - Add a mac filter to the filter list
- * @adapter: board private structure
- * @macaddr: the MAC address
- *
- * Returns ptr to the filter object or NULL when no memory available.
- **/
-static struct
-i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
-				     const u8 *macaddr)
-{
-	struct i40evf_mac_filter *f;
-
-	if (!macaddr)
-		return NULL;
-
-	f = i40evf_find_filter(adapter, macaddr);
-	if (!f) {
-		f = kzalloc(sizeof(*f), GFP_ATOMIC);
-		if (!f)
-			return f;
-
-		ether_addr_copy(f->macaddr, macaddr);
-
-		list_add_tail(&f->list, &adapter->mac_filter_list);
-		f->add = true;
-		adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-	} else {
-		f->remove = false;
-	}
-
-	return f;
-}
-
-/**
- * i40evf_set_mac - NDO callback to set port mac address
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int i40evf_set_mac(struct net_device *netdev, void *p)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct i40e_hw *hw = &adapter->hw;
-	struct i40evf_mac_filter *f;
-	struct sockaddr *addr = p;
-
-	if (!is_valid_ether_addr(addr->sa_data))
-		return -EADDRNOTAVAIL;
-
-	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
-		return 0;
-
-	if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
-		return -EPERM;
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	f = i40evf_find_filter(adapter, hw->mac.addr);
-	if (f) {
-		f->remove = true;
-		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-	}
-
-	f = i40evf_add_filter(adapter, addr->sa_data);
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	if (f) {
-		ether_addr_copy(hw->mac.addr, addr->sa_data);
-		ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
-	}
-
-	return (f == NULL) ? -ENOMEM : 0;
-}
-
-/**
- * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address
- * @netdev: the netdevice
- * @addr: address to add
- *
- * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
- * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
- */
-static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	if (i40evf_add_filter(adapter, addr))
-		return 0;
-	else
-		return -ENOMEM;
-}
-
-/**
- * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
- * @netdev: the netdevice
- * @addr: address to add
- *
- * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
- * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
- */
-static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct i40evf_mac_filter *f;
-
-	/* Under some circumstances, we might receive a request to delete
-	 * our own device address from our uc list. Because we store the
-	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
-	 * such requests and not delete our device address from this list.
-	 */
-	if (ether_addr_equal(addr, netdev->dev_addr))
-		return 0;
-
-	f = i40evf_find_filter(adapter, addr);
-	if (f) {
-		f->remove = true;
-		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-	}
-	return 0;
-}
-
-/**
- * i40evf_set_rx_mode - NDO callback to set the netdev filters
- * @netdev: network interface device structure
- **/
-static void i40evf_set_rx_mode(struct net_device *netdev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-	__dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
-	__dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	if (netdev->flags & IFF_PROMISC &&
-	    !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
-		adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
-	else if (!(netdev->flags & IFF_PROMISC) &&
-		 adapter->flags & I40EVF_FLAG_PROMISC_ON)
-		adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
-
-	if (netdev->flags & IFF_ALLMULTI &&
-	    !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
-		adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
-	else if (!(netdev->flags & IFF_ALLMULTI) &&
-		 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
-		adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
-}
-
-/**
- * i40evf_napi_enable_all - enable NAPI on all queue vectors
- * @adapter: board private structure
- **/
-static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
-{
-	int q_idx;
-	struct i40e_q_vector *q_vector;
-	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-
-	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-		struct napi_struct *napi;
-
-		q_vector = &adapter->q_vectors[q_idx];
-		napi = &q_vector->napi;
-		napi_enable(napi);
-	}
-}
-
-/**
- * i40evf_napi_disable_all - disable NAPI on all queue vectors
- * @adapter: board private structure
- **/
-static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
-{
-	int q_idx;
-	struct i40e_q_vector *q_vector;
-	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-
-	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-		q_vector = &adapter->q_vectors[q_idx];
-		napi_disable(&q_vector->napi);
-	}
-}
-
-/**
- * i40evf_configure - set up transmit and receive data structures
- * @adapter: board private structure
- **/
-static void i40evf_configure(struct i40evf_adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-	int i;
-
-	i40evf_set_rx_mode(netdev);
-
-	i40evf_configure_tx(adapter);
-	i40evf_configure_rx(adapter);
-	adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
-
-	for (i = 0; i < adapter->num_active_queues; i++) {
-		struct i40e_ring *ring = &adapter->rx_rings[i];
-
-		i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
-	}
-}
-
-/**
- * i40evf_up_complete - Finish the last steps of bringing up a connection
- * @adapter: board private structure
- *
- * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
- **/
-static void i40evf_up_complete(struct i40evf_adapter *adapter)
-{
-	adapter->state = __I40EVF_RUNNING;
-	clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
-
-	i40evf_napi_enable_all(adapter);
-
-	adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
-	if (CLIENT_ENABLED(adapter))
-		adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
-	mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
-}
-
-/**
- * i40e_down - Shutdown the connection processing
- * @adapter: board private structure
- *
- * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
- **/
-void i40evf_down(struct i40evf_adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-	struct i40evf_vlan_filter *vlf;
-	struct i40evf_mac_filter *f;
-	struct i40evf_cloud_filter *cf;
-
-	if (adapter->state <= __I40EVF_DOWN_PENDING)
-		return;
-
-	netif_carrier_off(netdev);
-	netif_tx_disable(netdev);
-	adapter->link_up = false;
-	i40evf_napi_disable_all(adapter);
-	i40evf_irq_disable(adapter);
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	/* clear the sync flag on all filters */
-	__dev_uc_unsync(adapter->netdev, NULL);
-	__dev_mc_unsync(adapter->netdev, NULL);
-
-	/* remove all MAC filters */
-	list_for_each_entry(f, &adapter->mac_filter_list, list) {
-		f->remove = true;
-	}
-
-	/* remove all VLAN filters */
-	list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
-		vlf->remove = true;
-	}
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	/* remove all cloud filters */
-	spin_lock_bh(&adapter->cloud_filter_list_lock);
-	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
-		cf->del = true;
-	}
-	spin_unlock_bh(&adapter->cloud_filter_list_lock);
-
-	if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
-	    adapter->state != __I40EVF_RESETTING) {
-		/* cancel any current operation */
-		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-		/* Schedule operations to close down the HW. Don't wait
-		 * here for this to complete. The watchdog is still running
-		 * and it will take care of this.
-		 */
-		adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
-		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
-		adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
-	}
-
-	mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
-}
-
-/**
- * i40evf_acquire_msix_vectors - Setup the MSIX capability
- * @adapter: board private structure
- * @vectors: number of vectors to request
- *
- * Work with the OS to set up the MSIX vectors needed.
- *
- * Returns 0 on success, negative on failure
- **/
-static int
-i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
-{
-	int err, vector_threshold;
-
-	/* We'll want at least 3 (vector_threshold):
-	 * 0) Other (Admin Queue and link, mostly)
-	 * 1) TxQ[0] Cleanup
-	 * 2) RxQ[0] Cleanup
-	 */
-	vector_threshold = MIN_MSIX_COUNT;
-
-	/* The more we get, the more we will assign to Tx/Rx Cleanup
-	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
-	 * Right now, we simply care about how many we'll get; we'll
-	 * set them up later while requesting irq's.
-	 */
-	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
-				    vector_threshold, vectors);
-	if (err < 0) {
-		dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
-		kfree(adapter->msix_entries);
-		adapter->msix_entries = NULL;
-		return err;
-	}
-
-	/* Adjust for only the vectors we'll use, which is minimum
-	 * of max_msix_q_vectors + NONQ_VECS, or the number of
-	 * vectors we were allocated.
-	 */
-	adapter->num_msix_vectors = err;
-	return 0;
-}
-
-/**
- * i40evf_free_queues - Free memory for all rings
- * @adapter: board private structure to initialize
- *
- * Free all of the memory associated with queue pairs.
- **/
-static void i40evf_free_queues(struct i40evf_adapter *adapter)
-{
-	if (!adapter->vsi_res)
-		return;
-	adapter->num_active_queues = 0;
-	kfree(adapter->tx_rings);
-	adapter->tx_rings = NULL;
-	kfree(adapter->rx_rings);
-	adapter->rx_rings = NULL;
-}
-
-/**
- * i40evf_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- *
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time.  The polling_netdev array is
- * intended for Multiqueue, but should work fine with a single queue.
- **/
-static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
-{
-	int i, num_active_queues;
-
-	/* If we're in reset reallocating queues we don't actually know yet for
-	 * certain the PF gave us the number of queues we asked for but we'll
-	 * assume it did.  Once basic reset is finished we'll confirm once we
-	 * start negotiating config with PF.
-	 */
-	if (adapter->num_req_queues)
-		num_active_queues = adapter->num_req_queues;
-	else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
-		 adapter->num_tc)
-		num_active_queues = adapter->ch_config.total_qps;
-	else
-		num_active_queues = min_t(int,
-					  adapter->vsi_res->num_queue_pairs,
-					  (int)(num_online_cpus()));
-
-
-	adapter->tx_rings = kcalloc(num_active_queues,
-				    sizeof(struct i40e_ring), GFP_KERNEL);
-	if (!adapter->tx_rings)
-		goto err_out;
-	adapter->rx_rings = kcalloc(num_active_queues,
-				    sizeof(struct i40e_ring), GFP_KERNEL);
-	if (!adapter->rx_rings)
-		goto err_out;
-
-	for (i = 0; i < num_active_queues; i++) {
-		struct i40e_ring *tx_ring;
-		struct i40e_ring *rx_ring;
-
-		tx_ring = &adapter->tx_rings[i];
-
-		tx_ring->queue_index = i;
-		tx_ring->netdev = adapter->netdev;
-		tx_ring->dev = &adapter->pdev->dev;
-		tx_ring->count = adapter->tx_desc_count;
-		tx_ring->itr_setting = I40E_ITR_TX_DEF;
-		if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
-			tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
-
-		rx_ring = &adapter->rx_rings[i];
-		rx_ring->queue_index = i;
-		rx_ring->netdev = adapter->netdev;
-		rx_ring->dev = &adapter->pdev->dev;
-		rx_ring->count = adapter->rx_desc_count;
-		rx_ring->itr_setting = I40E_ITR_RX_DEF;
-	}
-
-	adapter->num_active_queues = num_active_queues;
-
-	return 0;
-
-err_out:
-	i40evf_free_queues(adapter);
-	return -ENOMEM;
-}
-
-/**
- * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
- * @adapter: board private structure to initialize
- *
- * Attempt to configure the interrupts using the best available
- * capabilities of the hardware and the kernel.
- **/
-static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
-{
-	int vector, v_budget;
-	int pairs = 0;
-	int err = 0;
-
-	if (!adapter->vsi_res) {
-		err = -EIO;
-		goto out;
-	}
-	pairs = adapter->num_active_queues;
-
-	/* It's easy to be greedy for MSI-X vectors, but it really doesn't do
-	 * us much good if we have more vectors than CPUs. However, we already
-	 * limit the total number of queues by the number of CPUs so we do not
-	 * need any further limiting here.
-	 */
-	v_budget = min_t(int, pairs + NONQ_VECS,
-			 (int)adapter->vf_res->max_vectors);
-
-	adapter->msix_entries = kcalloc(v_budget,
-					sizeof(struct msix_entry), GFP_KERNEL);
-	if (!adapter->msix_entries) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	for (vector = 0; vector < v_budget; vector++)
-		adapter->msix_entries[vector].entry = vector;
-
-	err = i40evf_acquire_msix_vectors(adapter, v_budget);
-
-out:
-	netif_set_real_num_rx_queues(adapter->netdev, pairs);
-	netif_set_real_num_tx_queues(adapter->netdev, pairs);
-	return err;
-}
-
-/**
- * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
- * @adapter: board private structure
- *
- * Return 0 on success, negative on failure
- **/
-static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
-{
-	struct i40e_aqc_get_set_rss_key_data *rss_key =
-		(struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
-	struct i40e_hw *hw = &adapter->hw;
-	int ret = 0;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
-			adapter->current_op);
-		return -EBUSY;
-	}
-
-	ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
-	if (ret) {
-		dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
-			i40evf_stat_str(hw, ret),
-			i40evf_aq_str(hw, hw->aq.asq_last_status));
-		return ret;
-
-	}
-
-	ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
-				    adapter->rss_lut, adapter->rss_lut_size);
-	if (ret) {
-		dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
-			i40evf_stat_str(hw, ret),
-			i40evf_aq_str(hw, hw->aq.asq_last_status));
-	}
-
-	return ret;
-
-}
-
-/**
- * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
- * @adapter: board private structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	u32 *dw;
-	u16 i;
-
-	dw = (u32 *)adapter->rss_key;
-	for (i = 0; i <= adapter->rss_key_size / 4; i++)
-		wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
-
-	dw = (u32 *)adapter->rss_lut;
-	for (i = 0; i <= adapter->rss_lut_size / 4; i++)
-		wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
-
-	i40e_flush(hw);
-
-	return 0;
-}
-
-/**
- * i40evf_config_rss - Configure RSS keys and lut
- * @adapter: board private structure
- *
- * Returns 0 on success, negative on failure
- **/
-int i40evf_config_rss(struct i40evf_adapter *adapter)
-{
-
-	if (RSS_PF(adapter)) {
-		adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
-					I40EVF_FLAG_AQ_SET_RSS_KEY;
-		return 0;
-	} else if (RSS_AQ(adapter)) {
-		return i40evf_config_rss_aq(adapter);
-	} else {
-		return i40evf_config_rss_reg(adapter);
-	}
-}
-
-/**
- * i40evf_fill_rss_lut - Fill the lut with default values
- * @adapter: board private structure
- **/
-static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
-{
-	u16 i;
-
-	for (i = 0; i < adapter->rss_lut_size; i++)
-		adapter->rss_lut[i] = i % adapter->num_active_queues;
-}
-
-/**
- * i40evf_init_rss - Prepare for RSS
- * @adapter: board private structure
- *
- * Return 0 on success, negative on failure
- **/
-static int i40evf_init_rss(struct i40evf_adapter *adapter)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	int ret;
-
-	if (!RSS_PF(adapter)) {
-		/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
-		if (adapter->vf_res->vf_cap_flags &
-		    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-			adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
-		else
-			adapter->hena = I40E_DEFAULT_RSS_HENA;
-
-		wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
-		wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
-	}
-
-	i40evf_fill_rss_lut(adapter);
-
-	netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
-	ret = i40evf_config_rss(adapter);
-
-	return ret;
-}
-
-/**
- * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * We allocate one q_vector per queue interrupt.  If allocation fails we
- * return -ENOMEM.
- **/
-static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
-{
-	int q_idx = 0, num_q_vectors;
-	struct i40e_q_vector *q_vector;
-
-	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-	adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
-				     GFP_KERNEL);
-	if (!adapter->q_vectors)
-		return -ENOMEM;
-
-	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
-		q_vector = &adapter->q_vectors[q_idx];
-		q_vector->adapter = adapter;
-		q_vector->vsi = &adapter->vsi;
-		q_vector->v_idx = q_idx;
-		q_vector->reg_idx = q_idx;
-		cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
-		netif_napi_add(adapter->netdev, &q_vector->napi,
-			       i40evf_napi_poll, NAPI_POLL_WEIGHT);
-	}
-
-	return 0;
-}
-
-/**
- * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * This function frees the memory allocated to the q_vectors.  In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
- **/
-static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
-{
-	int q_idx, num_q_vectors;
-	int napi_vectors;
-
-	if (!adapter->q_vectors)
-		return;
-
-	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-	napi_vectors = adapter->num_active_queues;
-
-	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
-		struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
-		if (q_idx < napi_vectors)
-			netif_napi_del(&q_vector->napi);
-	}
-	kfree(adapter->q_vectors);
-	adapter->q_vectors = NULL;
-}
-
-/**
- * i40evf_reset_interrupt_capability - Reset MSIX setup
- * @adapter: board private structure
- *
- **/
-void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
-{
-	if (!adapter->msix_entries)
-		return;
-
-	pci_disable_msix(adapter->pdev);
-	kfree(adapter->msix_entries);
-	adapter->msix_entries = NULL;
-}
-
-/**
- * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
- * @adapter: board private structure to initialize
- *
- **/
-int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
-{
-	int err;
-
-	err = i40evf_alloc_queues(adapter);
-	if (err) {
-		dev_err(&adapter->pdev->dev,
-			"Unable to allocate memory for queues\n");
-		goto err_alloc_queues;
-	}
-
-	rtnl_lock();
-	err = i40evf_set_interrupt_capability(adapter);
-	rtnl_unlock();
-	if (err) {
-		dev_err(&adapter->pdev->dev,
-			"Unable to setup interrupt capabilities\n");
-		goto err_set_interrupt;
-	}
-
-	err = i40evf_alloc_q_vectors(adapter);
-	if (err) {
-		dev_err(&adapter->pdev->dev,
-			"Unable to allocate memory for queue vectors\n");
-		goto err_alloc_q_vectors;
-	}
-
-	/* If we've made it so far while ADq flag being ON, then we haven't
-	 * bailed out anywhere in middle. And ADq isn't just enabled but actual
-	 * resources have been allocated in the reset path.
-	 * Now we can truly claim that ADq is enabled.
-	 */
-	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
-	    adapter->num_tc)
-		dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
-			 adapter->num_tc);
-
-	dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
-		 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
-		 adapter->num_active_queues);
-
-	return 0;
-err_alloc_q_vectors:
-	i40evf_reset_interrupt_capability(adapter);
-err_set_interrupt:
-	i40evf_free_queues(adapter);
-err_alloc_queues:
-	return err;
-}
-
-/**
- * i40evf_free_rss - Free memory used by RSS structs
- * @adapter: board private structure
- **/
-static void i40evf_free_rss(struct i40evf_adapter *adapter)
-{
-	kfree(adapter->rss_key);
-	adapter->rss_key = NULL;
-
-	kfree(adapter->rss_lut);
-	adapter->rss_lut = NULL;
-}
-
-/**
- * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
- * @adapter: board private structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-	int err;
-
-	if (netif_running(netdev))
-		i40evf_free_traffic_irqs(adapter);
-	i40evf_free_misc_irq(adapter);
-	i40evf_reset_interrupt_capability(adapter);
-	i40evf_free_q_vectors(adapter);
-	i40evf_free_queues(adapter);
-
-	err =  i40evf_init_interrupt_scheme(adapter);
-	if (err)
-		goto err;
-
-	netif_tx_stop_all_queues(netdev);
-
-	err = i40evf_request_misc_irq(adapter);
-	if (err)
-		goto err;
-
-	set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
-
-	i40evf_map_rings_to_vectors(adapter);
-
-	if (RSS_AQ(adapter))
-		adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
-	else
-		err = i40evf_init_rss(adapter);
-err:
-	return err;
-}
-
-/**
- * i40evf_watchdog_timer - Periodic call-back timer
- * @data: pointer to adapter disguised as unsigned long
- **/
-static void i40evf_watchdog_timer(struct timer_list *t)
-{
-	struct i40evf_adapter *adapter = from_timer(adapter, t,
-						    watchdog_timer);
-
-	schedule_work(&adapter->watchdog_task);
-	/* timer will be rescheduled in watchdog task */
-}
-
-/**
- * i40evf_watchdog_task - Periodic call-back task
- * @work: pointer to work_struct
- **/
-static void i40evf_watchdog_task(struct work_struct *work)
-{
-	struct i40evf_adapter *adapter = container_of(work,
-						      struct i40evf_adapter,
-						      watchdog_task);
-	struct i40e_hw *hw = &adapter->hw;
-	u32 reg_val;
-
-	if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
-		goto restart_watchdog;
-
-	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
-		reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
-			  I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-		if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
-		    (reg_val == VIRTCHNL_VFR_COMPLETED)) {
-			/* A chance for redemption! */
-			dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
-			adapter->state = __I40EVF_STARTUP;
-			adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
-			schedule_delayed_work(&adapter->init_task, 10);
-			clear_bit(__I40EVF_IN_CRITICAL_TASK,
-				  &adapter->crit_section);
-			/* Don't reschedule the watchdog, since we've restarted
-			 * the init task. When init_task contacts the PF and
-			 * gets everything set up again, it'll restart the
-			 * watchdog for us. Down, boy. Sit. Stay. Woof.
-			 */
-			return;
-		}
-		adapter->aq_required = 0;
-		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-		goto watchdog_done;
-	}
-
-	if ((adapter->state < __I40EVF_DOWN) ||
-	    (adapter->flags & I40EVF_FLAG_RESET_PENDING))
-		goto watchdog_done;
-
-	/* check for reset */
-	reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
-	if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
-		adapter->state = __I40EVF_RESETTING;
-		adapter->flags |= I40EVF_FLAG_RESET_PENDING;
-		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
-		schedule_work(&adapter->reset_task);
-		adapter->aq_required = 0;
-		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-		goto watchdog_done;
-	}
-
-	/* Process admin queue tasks. After init, everything gets done
-	 * here so we don't race on the admin queue.
-	 */
-	if (adapter->current_op) {
-		if (!i40evf_asq_done(hw)) {
-			dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
-			i40evf_send_api_ver(adapter);
-		}
-		goto watchdog_done;
-	}
-	if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
-		i40evf_send_vf_config_msg(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
-		i40evf_disable_queues(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
-		i40evf_map_queues(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
-		i40evf_add_ether_addrs(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
-		i40evf_add_vlans(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
-		i40evf_del_ether_addrs(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
-		i40evf_del_vlans(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
-		i40evf_enable_vlan_stripping(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
-		i40evf_disable_vlan_stripping(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
-		i40evf_configure_queues(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
-		i40evf_enable_queues(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
-		/* This message goes straight to the firmware, not the
-		 * PF, so we don't have to set current_op as we will
-		 * not get a response through the ARQ.
-		 */
-		i40evf_init_rss(adapter);
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
-		goto watchdog_done;
-	}
-	if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
-		i40evf_get_hena(adapter);
-		goto watchdog_done;
-	}
-	if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
-		i40evf_set_hena(adapter);
-		goto watchdog_done;
-	}
-	if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
-		i40evf_set_rss_key(adapter);
-		goto watchdog_done;
-	}
-	if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
-		i40evf_set_rss_lut(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
-		i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
-				       FLAG_VF_MULTICAST_PROMISC);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
-		i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
-		goto watchdog_done;
-	}
-
-	if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
-	    (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
-		i40evf_set_promiscuous(adapter, 0);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) {
-		i40evf_enable_channels(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) {
-		i40evf_disable_channels(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) {
-		i40evf_add_cloud_filter(adapter);
-		goto watchdog_done;
-	}
-
-	if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) {
-		i40evf_del_cloud_filter(adapter);
-		goto watchdog_done;
-	}
-
-	schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
-
-	if (adapter->state == __I40EVF_RUNNING)
-		i40evf_request_stats(adapter);
-watchdog_done:
-	if (adapter->state == __I40EVF_RUNNING)
-		i40evf_detect_recover_hung(&adapter->vsi);
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-restart_watchdog:
-	if (adapter->state == __I40EVF_REMOVE)
-		return;
-	if (adapter->aq_required)
-		mod_timer(&adapter->watchdog_timer,
-			  jiffies + msecs_to_jiffies(20));
-	else
-		mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
-	schedule_work(&adapter->adminq_task);
-}
-
-static void i40evf_disable_vf(struct i40evf_adapter *adapter)
-{
-	struct i40evf_mac_filter *f, *ftmp;
-	struct i40evf_vlan_filter *fv, *fvtmp;
-	struct i40evf_cloud_filter *cf, *cftmp;
-
-	adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
-
-	/* We don't use netif_running() because it may be true prior to
-	 * ndo_open() returning, so we can't assume it means all our open
-	 * tasks have finished, since we're not holding the rtnl_lock here.
-	 */
-	if (adapter->state == __I40EVF_RUNNING) {
-		set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
-		netif_carrier_off(adapter->netdev);
-		netif_tx_disable(adapter->netdev);
-		adapter->link_up = false;
-		i40evf_napi_disable_all(adapter);
-		i40evf_irq_disable(adapter);
-		i40evf_free_traffic_irqs(adapter);
-		i40evf_free_all_tx_resources(adapter);
-		i40evf_free_all_rx_resources(adapter);
-	}
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	/* Delete all of the filters */
-	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
-		list_del(&f->list);
-		kfree(f);
-	}
-
-	list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
-		list_del(&fv->list);
-		kfree(fv);
-	}
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	spin_lock_bh(&adapter->cloud_filter_list_lock);
-	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
-		list_del(&cf->list);
-		kfree(cf);
-		adapter->num_cloud_filters--;
-	}
-	spin_unlock_bh(&adapter->cloud_filter_list_lock);
-
-	i40evf_free_misc_irq(adapter);
-	i40evf_reset_interrupt_capability(adapter);
-	i40evf_free_queues(adapter);
-	i40evf_free_q_vectors(adapter);
-	kfree(adapter->vf_res);
-	i40evf_shutdown_adminq(&adapter->hw);
-	adapter->netdev->flags &= ~IFF_UP;
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-	adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
-	adapter->state = __I40EVF_DOWN;
-	wake_up(&adapter->down_waitqueue);
-	dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
-}
-
-#define I40EVF_RESET_WAIT_MS 10
-#define I40EVF_RESET_WAIT_COUNT 500
-/**
- * i40evf_reset_task - Call-back task to handle hardware reset
- * @work: pointer to work_struct
- *
- * During reset we need to shut down and reinitialize the admin queue
- * before we can use it to communicate with the PF again. We also clear
- * and reinit the rings because that context is lost as well.
- **/
-static void i40evf_reset_task(struct work_struct *work)
-{
-	struct i40evf_adapter *adapter = container_of(work,
-						      struct i40evf_adapter,
-						      reset_task);
-	struct virtchnl_vf_resource *vfres = adapter->vf_res;
-	struct net_device *netdev = adapter->netdev;
-	struct i40e_hw *hw = &adapter->hw;
-	struct i40evf_vlan_filter *vlf;
-	struct i40evf_cloud_filter *cf;
-	struct i40evf_mac_filter *f;
-	u32 reg_val;
-	int i = 0, err;
-	bool running;
-
-	/* When device is being removed it doesn't make sense to run the reset
-	 * task, just return in such a case.
-	 */
-	if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
-		return;
-
-	while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
-				&adapter->crit_section))
-		usleep_range(500, 1000);
-	if (CLIENT_ENABLED(adapter)) {
-		adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
-				    I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
-				    I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
-				    I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
-		cancel_delayed_work_sync(&adapter->client_task);
-		i40evf_notify_client_close(&adapter->vsi, true);
-	}
-	i40evf_misc_irq_disable(adapter);
-	if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
-		adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
-		/* Restart the AQ here. If we have been reset but didn't
-		 * detect it, or if the PF had to reinit, our AQ will be hosed.
-		 */
-		i40evf_shutdown_adminq(hw);
-		i40evf_init_adminq(hw);
-		i40evf_request_reset(adapter);
-	}
-	adapter->flags |= I40EVF_FLAG_RESET_PENDING;
-
-	/* poll until we see the reset actually happen */
-	for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
-		reg_val = rd32(hw, I40E_VF_ARQLEN1) &
-			  I40E_VF_ARQLEN1_ARQENABLE_MASK;
-		if (!reg_val)
-			break;
-		usleep_range(5000, 10000);
-	}
-	if (i == I40EVF_RESET_WAIT_COUNT) {
-		dev_info(&adapter->pdev->dev, "Never saw reset\n");
-		goto continue_reset; /* act like the reset happened */
-	}
-
-	/* wait until the reset is complete and the PF is responding to us */
-	for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
-		/* sleep first to make sure a minimum wait time is met */
-		msleep(I40EVF_RESET_WAIT_MS);
-
-		reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
-			  I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-		if (reg_val == VIRTCHNL_VFR_VFACTIVE)
-			break;
-	}
-
-	pci_set_master(adapter->pdev);
-
-	if (i == I40EVF_RESET_WAIT_COUNT) {
-		dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
-			reg_val);
-		i40evf_disable_vf(adapter);
-		clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
-		return; /* Do not attempt to reinit. It's dead, Jim. */
-	}
-
-continue_reset:
-	/* We don't use netif_running() because it may be true prior to
-	 * ndo_open() returning, so we can't assume it means all our open
-	 * tasks have finished, since we're not holding the rtnl_lock here.
-	 */
-	running = ((adapter->state == __I40EVF_RUNNING) ||
-		   (adapter->state == __I40EVF_RESETTING));
-
-	if (running) {
-		netif_carrier_off(netdev);
-		netif_tx_stop_all_queues(netdev);
-		adapter->link_up = false;
-		i40evf_napi_disable_all(adapter);
-	}
-	i40evf_irq_disable(adapter);
-
-	adapter->state = __I40EVF_RESETTING;
-	adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
-
-	/* free the Tx/Rx rings and descriptors, might be better to just
-	 * re-use them sometime in the future
-	 */
-	i40evf_free_all_rx_resources(adapter);
-	i40evf_free_all_tx_resources(adapter);
-
-	adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED;
-	/* kill and reinit the admin queue */
-	i40evf_shutdown_adminq(hw);
-	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-	err = i40evf_init_adminq(hw);
-	if (err)
-		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
-			 err);
-	adapter->aq_required = 0;
-
-	if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
-		err = i40evf_reinit_interrupt_scheme(adapter);
-		if (err)
-			goto reset_err;
-	}
-
-	adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
-	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	/* re-add all MAC filters */
-	list_for_each_entry(f, &adapter->mac_filter_list, list) {
-		f->add = true;
-	}
-	/* re-add all VLAN filters */
-	list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
-		vlf->add = true;
-	}
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	/* check if TCs are running and re-add all cloud filters */
-	spin_lock_bh(&adapter->cloud_filter_list_lock);
-	if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
-	    adapter->num_tc) {
-		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
-			cf->add = true;
-		}
-	}
-	spin_unlock_bh(&adapter->cloud_filter_list_lock);
-
-	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
-	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
-	i40evf_misc_irq_enable(adapter);
-
-	mod_timer(&adapter->watchdog_timer, jiffies + 2);
-
-	/* We were running when the reset started, so we need to restore some
-	 * state here.
-	 */
-	if (running) {
-		/* allocate transmit descriptors */
-		err = i40evf_setup_all_tx_resources(adapter);
-		if (err)
-			goto reset_err;
-
-		/* allocate receive descriptors */
-		err = i40evf_setup_all_rx_resources(adapter);
-		if (err)
-			goto reset_err;
-
-		if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
-			err = i40evf_request_traffic_irqs(adapter,
-							  netdev->name);
-			if (err)
-				goto reset_err;
-
-			adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
-		}
-
-		i40evf_configure(adapter);
-
-		i40evf_up_complete(adapter);
-
-		i40evf_irq_enable(adapter, true);
-	} else {
-		adapter->state = __I40EVF_DOWN;
-		wake_up(&adapter->down_waitqueue);
-	}
-	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-
-	return;
-reset_err:
-	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
-	i40evf_close(netdev);
-}
-
-/**
- * i40evf_adminq_task - worker thread to clean the admin queue
- * @work: pointer to work_struct containing our data
- **/
-static void i40evf_adminq_task(struct work_struct *work)
-{
-	struct i40evf_adapter *adapter =
-		container_of(work, struct i40evf_adapter, adminq_task);
-	struct i40e_hw *hw = &adapter->hw;
-	struct i40e_arq_event_info event;
-	enum virtchnl_ops v_op;
-	i40e_status ret, v_ret;
-	u32 val, oldval;
-	u16 pending;
-
-	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
-		goto out;
-
-	event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
-	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
-	if (!event.msg_buf)
-		goto out;
-
-	do {
-		ret = i40evf_clean_arq_element(hw, &event, &pending);
-		v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
-		v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
-
-		if (ret || !v_op)
-			break; /* No event to process or error cleaning ARQ */
-
-		i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
-					   event.msg_len);
-		if (pending != 0)
-			memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
-	} while (pending);
-
-	if ((adapter->flags &
-	     (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
-	    adapter->state == __I40EVF_RESETTING)
-		goto freedom;
-
-	/* check for error indications */
-	val = rd32(hw, hw->aq.arq.len);
-	if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
-		goto freedom;
-	oldval = val;
-	if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
-		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
-		val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
-	}
-	if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
-		dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
-		val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
-	}
-	if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
-		dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
-		val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
-	}
-	if (oldval != val)
-		wr32(hw, hw->aq.arq.len, val);
-
-	val = rd32(hw, hw->aq.asq.len);
-	oldval = val;
-	if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
-		dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
-		val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
-	}
-	if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
-		dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
-		val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
-	}
-	if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
-		dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
-		val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
-	}
-	if (oldval != val)
-		wr32(hw, hw->aq.asq.len, val);
-
-freedom:
-	kfree(event.msg_buf);
-out:
-	/* re-enable Admin queue interrupt cause */
-	i40evf_misc_irq_enable(adapter);
-}
-
-/**
- * i40evf_client_task - worker thread to perform client work
- * @work: pointer to work_struct containing our data
- *
- * This task handles client interactions. Because client calls can be
- * reentrant, we can't handle them in the watchdog.
- **/
-static void i40evf_client_task(struct work_struct *work)
-{
-	struct i40evf_adapter *adapter =
-		container_of(work, struct i40evf_adapter, client_task.work);
-
-	/* If we can't get the client bit, just give up. We'll be rescheduled
-	 * later.
-	 */
-
-	if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
-		return;
-
-	if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
-		i40evf_client_subtask(adapter);
-		adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
-		goto out;
-	}
-	if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
-		i40evf_notify_client_l2_params(&adapter->vsi);
-		adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
-		goto out;
-	}
-	if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
-		i40evf_notify_client_close(&adapter->vsi, false);
-		adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
-		goto out;
-	}
-	if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
-		i40evf_notify_client_open(&adapter->vsi);
-		adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
-	}
-out:
-	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
-}
-
-/**
- * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
- * @adapter: board private structure
- *
- * Free all transmit software resources
- **/
-void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
-{
-	int i;
-
-	if (!adapter->tx_rings)
-		return;
-
-	for (i = 0; i < adapter->num_active_queues; i++)
-		if (adapter->tx_rings[i].desc)
-			i40evf_free_tx_resources(&adapter->tx_rings[i]);
-}
-
-/**
- * i40evf_setup_all_tx_resources - allocate all queues Tx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
-{
-	int i, err = 0;
-
-	for (i = 0; i < adapter->num_active_queues; i++) {
-		adapter->tx_rings[i].count = adapter->tx_desc_count;
-		err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
-		if (!err)
-			continue;
-		dev_err(&adapter->pdev->dev,
-			"Allocation for Tx Queue %u failed\n", i);
-		break;
-	}
-
-	return err;
-}
-
-/**
- * i40evf_setup_all_rx_resources - allocate all queues Rx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
-{
-	int i, err = 0;
-
-	for (i = 0; i < adapter->num_active_queues; i++) {
-		adapter->rx_rings[i].count = adapter->rx_desc_count;
-		err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
-		if (!err)
-			continue;
-		dev_err(&adapter->pdev->dev,
-			"Allocation for Rx Queue %u failed\n", i);
-		break;
-	}
-	return err;
-}
-
-/**
- * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
- * @adapter: board private structure
- *
- * Free all receive software resources
- **/
-void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
-{
-	int i;
-
-	if (!adapter->rx_rings)
-		return;
-
-	for (i = 0; i < adapter->num_active_queues; i++)
-		if (adapter->rx_rings[i].desc)
-			i40evf_free_rx_resources(&adapter->rx_rings[i]);
-}
-
-/**
- * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth
- * @adapter: board private structure
- * @max_tx_rate: max Tx bw for a tc
- **/
-static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
-					u64 max_tx_rate)
-{
-	int speed = 0, ret = 0;
-
-	switch (adapter->link_speed) {
-	case I40E_LINK_SPEED_40GB:
-		speed = 40000;
-		break;
-	case I40E_LINK_SPEED_25GB:
-		speed = 25000;
-		break;
-	case I40E_LINK_SPEED_20GB:
-		speed = 20000;
-		break;
-	case I40E_LINK_SPEED_10GB:
-		speed = 10000;
-		break;
-	case I40E_LINK_SPEED_1GB:
-		speed = 1000;
-		break;
-	case I40E_LINK_SPEED_100MB:
-		speed = 100;
-		break;
-	default:
-		break;
-	}
-
-	if (max_tx_rate > speed) {
-		dev_err(&adapter->pdev->dev,
-			"Invalid tx rate specified\n");
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-/**
- * i40evf_validate_channel_config - validate queue mapping info
- * @adapter: board private structure
- * @mqprio_qopt: queue parameters
- *
- * This function validates if the config provided by the user to
- * configure queue channels is valid or not. Returns 0 on a valid
- * config.
- **/
-static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
-				     struct tc_mqprio_qopt_offload *mqprio_qopt)
-{
-	u64 total_max_rate = 0;
-	int i, num_qps = 0;
-	u64 tx_rate = 0;
-	int ret = 0;
-
-	if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS ||
-	    mqprio_qopt->qopt.num_tc < 1)
-		return -EINVAL;
-
-	for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
-		if (!mqprio_qopt->qopt.count[i] ||
-		    mqprio_qopt->qopt.offset[i] != num_qps)
-			return -EINVAL;
-		if (mqprio_qopt->min_rate[i]) {
-			dev_err(&adapter->pdev->dev,
-				"Invalid min tx rate (greater than 0) specified\n");
-			return -EINVAL;
-		}
-		/*convert to Mbps */
-		tx_rate = div_u64(mqprio_qopt->max_rate[i],
-				  I40EVF_MBPS_DIVISOR);
-		total_max_rate += tx_rate;
-		num_qps += mqprio_qopt->qopt.count[i];
-	}
-	if (num_qps > I40EVF_MAX_REQ_QUEUES)
-		return -EINVAL;
-
-	ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
-	return ret;
-}
-
-/**
- * i40evf_del_all_cloud_filters - delete all cloud filters
- * on the traffic classes
- **/
-static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
-{
-	struct i40evf_cloud_filter *cf, *cftmp;
-
-	spin_lock_bh(&adapter->cloud_filter_list_lock);
-	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
-				 list) {
-		list_del(&cf->list);
-		kfree(cf);
-		adapter->num_cloud_filters--;
-	}
-	spin_unlock_bh(&adapter->cloud_filter_list_lock);
-}
-
-/**
- * __i40evf_setup_tc - configure multiple traffic classes
- * @netdev: network interface device structure
- * @type_date: tc offload data
- *
- * This function processes the config information provided by the
- * user to configure traffic classes/queue channels and packages the
- * information to request the PF to setup traffic classes.
- *
- * Returns 0 on success.
- **/
-static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
-{
-	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct virtchnl_vf_resource *vfres = adapter->vf_res;
-	u8 num_tc = 0, total_qps = 0;
-	int ret = 0, netdev_tc = 0;
-	u64 max_tx_rate;
-	u16 mode;
-	int i;
-
-	num_tc = mqprio_qopt->qopt.num_tc;
-	mode = mqprio_qopt->mode;
-
-	/* delete queue_channel */
-	if (!mqprio_qopt->qopt.hw) {
-		if (adapter->ch_config.state == __I40EVF_TC_RUNNING) {
-			/* reset the tc configuration */
-			netdev_reset_tc(netdev);
-			adapter->num_tc = 0;
-			netif_tx_stop_all_queues(netdev);
-			netif_tx_disable(netdev);
-			i40evf_del_all_cloud_filters(adapter);
-			adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
-			goto exit;
-		} else {
-			return -EINVAL;
-		}
-	}
-
-	/* add queue channel */
-	if (mode == TC_MQPRIO_MODE_CHANNEL) {
-		if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
-			dev_err(&adapter->pdev->dev, "ADq not supported\n");
-			return -EOPNOTSUPP;
-		}
-		if (adapter->ch_config.state != __I40EVF_TC_INVALID) {
-			dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
-			return -EINVAL;
-		}
-
-		ret = i40evf_validate_ch_config(adapter, mqprio_qopt);
-		if (ret)
-			return ret;
-		/* Return if same TC config is requested */
-		if (adapter->num_tc == num_tc)
-			return 0;
-		adapter->num_tc = num_tc;
-
-		for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
-			if (i < num_tc) {
-				adapter->ch_config.ch_info[i].count =
-					mqprio_qopt->qopt.count[i];
-				adapter->ch_config.ch_info[i].offset =
-					mqprio_qopt->qopt.offset[i];
-				total_qps += mqprio_qopt->qopt.count[i];
-				max_tx_rate = mqprio_qopt->max_rate[i];
-				/* convert to Mbps */
-				max_tx_rate = div_u64(max_tx_rate,
-						      I40EVF_MBPS_DIVISOR);
-				adapter->ch_config.ch_info[i].max_tx_rate =
-					max_tx_rate;
-			} else {
-				adapter->ch_config.ch_info[i].count = 1;
-				adapter->ch_config.ch_info[i].offset = 0;
-			}
-		}
-		adapter->ch_config.total_qps = total_qps;
-		netif_tx_stop_all_queues(netdev);
-		netif_tx_disable(netdev);
-		adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS;
-		netdev_reset_tc(netdev);
-		/* Report the tc mapping up the stack */
-		netdev_set_num_tc(adapter->netdev, num_tc);
-		for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
-			u16 qcount = mqprio_qopt->qopt.count[i];
-			u16 qoffset = mqprio_qopt->qopt.offset[i];
-
-			if (i < num_tc)
-				netdev_set_tc_queue(netdev, netdev_tc++, qcount,
-						    qoffset);
-		}
-	}
-exit:
-	return ret;
-}
-
-/**
- * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel
- * @adapter: board private structure
- * @cls_flower: pointer to struct tc_cls_flower_offload
- * @filter: pointer to cloud filter structure
- */
-static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
-				   struct tc_cls_flower_offload *f,
-				   struct i40evf_cloud_filter *filter)
-{
-	u16 n_proto_mask = 0;
-	u16 n_proto_key = 0;
-	u8 field_flags = 0;
-	u16 addr_type = 0;
-	u16 n_proto = 0;
-	int i = 0;
-	struct virtchnl_filter *vf = &filter->f;
-
-	if (f->dissector->used_keys &
-	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
-	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
-	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
-	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
-	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
-	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
-	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
-	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
-		dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
-			f->dissector->used_keys);
-		return -EOPNOTSUPP;
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
-		struct flow_dissector_key_keyid *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ENC_KEYID,
-						  f->mask);
-
-		if (mask->keyid != 0)
-			field_flags |= I40EVF_CLOUD_FIELD_TEN_ID;
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
-		struct flow_dissector_key_basic *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_BASIC,
-						  f->key);
-
-		struct flow_dissector_key_basic *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_BASIC,
-						  f->mask);
-		n_proto_key = ntohs(key->n_proto);
-		n_proto_mask = ntohs(mask->n_proto);
-
-		if (n_proto_key == ETH_P_ALL) {
-			n_proto_key = 0;
-			n_proto_mask = 0;
-		}
-		n_proto = n_proto_key & n_proto_mask;
-		if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
-			return -EINVAL;
-		if (n_proto == ETH_P_IPV6) {
-			/* specify flow type as TCP IPv6 */
-			vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
-		}
-
-		if (key->ip_proto != IPPROTO_TCP) {
-			dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
-			return -EINVAL;
-		}
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
-		struct flow_dissector_key_eth_addrs *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
-						  f->key);
-
-		struct flow_dissector_key_eth_addrs *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
-						  f->mask);
-		/* use is_broadcast and is_zero to check for all 0xf or 0 */
-		if (!is_zero_ether_addr(mask->dst)) {
-			if (is_broadcast_ether_addr(mask->dst)) {
-				field_flags |= I40EVF_CLOUD_FIELD_OMAC;
-			} else {
-				dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
-					mask->dst);
-				return I40E_ERR_CONFIG;
-			}
-		}
-
-		if (!is_zero_ether_addr(mask->src)) {
-			if (is_broadcast_ether_addr(mask->src)) {
-				field_flags |= I40EVF_CLOUD_FIELD_IMAC;
-			} else {
-				dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
-					mask->src);
-				return I40E_ERR_CONFIG;
-			}
-		}
-
-		if (!is_zero_ether_addr(key->dst))
-			if (is_valid_ether_addr(key->dst) ||
-			    is_multicast_ether_addr(key->dst)) {
-				/* set the mask if a valid dst_mac address */
-				for (i = 0; i < ETH_ALEN; i++)
-					vf->mask.tcp_spec.dst_mac[i] |= 0xff;
-				ether_addr_copy(vf->data.tcp_spec.dst_mac,
-						key->dst);
-			}
-
-		if (!is_zero_ether_addr(key->src))
-			if (is_valid_ether_addr(key->src) ||
-			    is_multicast_ether_addr(key->src)) {
-				/* set the mask if a valid dst_mac address */
-				for (i = 0; i < ETH_ALEN; i++)
-					vf->mask.tcp_spec.src_mac[i] |= 0xff;
-				ether_addr_copy(vf->data.tcp_spec.src_mac,
-						key->src);
-		}
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
-		struct flow_dissector_key_vlan *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_VLAN,
-						  f->key);
-		struct flow_dissector_key_vlan *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_VLAN,
-						  f->mask);
-
-		if (mask->vlan_id) {
-			if (mask->vlan_id == VLAN_VID_MASK) {
-				field_flags |= I40EVF_CLOUD_FIELD_IVLAN;
-			} else {
-				dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
-					mask->vlan_id);
-				return I40E_ERR_CONFIG;
-			}
-		}
-		vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
-		vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
-		struct flow_dissector_key_control *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_CONTROL,
-						  f->key);
-
-		addr_type = key->addr_type;
-	}
-
-	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
-		struct flow_dissector_key_ipv4_addrs *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
-						  f->key);
-		struct flow_dissector_key_ipv4_addrs *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
-						  f->mask);
-
-		if (mask->dst) {
-			if (mask->dst == cpu_to_be32(0xffffffff)) {
-				field_flags |= I40EVF_CLOUD_FIELD_IIP;
-			} else {
-				dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
-					be32_to_cpu(mask->dst));
-				return I40E_ERR_CONFIG;
-			}
-		}
-
-		if (mask->src) {
-			if (mask->src == cpu_to_be32(0xffffffff)) {
-				field_flags |= I40EVF_CLOUD_FIELD_IIP;
-			} else {
-				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
-					be32_to_cpu(mask->dst));
-				return I40E_ERR_CONFIG;
-			}
-		}
-
-		if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) {
-			dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
-			return I40E_ERR_CONFIG;
-		}
-		if (key->dst) {
-			vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
-			vf->data.tcp_spec.dst_ip[0] = key->dst;
-		}
-		if (key->src) {
-			vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
-			vf->data.tcp_spec.src_ip[0] = key->src;
-		}
-	}
-
-	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
-		struct flow_dissector_key_ipv6_addrs *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
-						  f->key);
-		struct flow_dissector_key_ipv6_addrs *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
-						  f->mask);
-
-		/* validate mask, make sure it is not IPV6_ADDR_ANY */
-		if (ipv6_addr_any(&mask->dst)) {
-			dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
-				IPV6_ADDR_ANY);
-			return I40E_ERR_CONFIG;
-		}
-
-		/* src and dest IPv6 address should not be LOOPBACK
-		 * (0:0:0:0:0:0:0:1) which can be represented as ::1
-		 */
-		if (ipv6_addr_loopback(&key->dst) ||
-		    ipv6_addr_loopback(&key->src)) {
-			dev_err(&adapter->pdev->dev,
-				"ipv6 addr should not be loopback\n");
-			return I40E_ERR_CONFIG;
-		}
-		if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
-			field_flags |= I40EVF_CLOUD_FIELD_IIP;
-
-		for (i = 0; i < 4; i++)
-			vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
-		memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
-		       sizeof(vf->data.tcp_spec.dst_ip));
-		for (i = 0; i < 4; i++)
-			vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
-		memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
-		       sizeof(vf->data.tcp_spec.src_ip));
-	}
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
-		struct flow_dissector_key_ports *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_PORTS,
-						  f->key);
-		struct flow_dissector_key_ports *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_PORTS,
-						  f->mask);
-
-		if (mask->src) {
-			if (mask->src == cpu_to_be16(0xffff)) {
-				field_flags |= I40EVF_CLOUD_FIELD_IIP;
-			} else {
-				dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
-					be16_to_cpu(mask->src));
-				return I40E_ERR_CONFIG;
-			}
-		}
-
-		if (mask->dst) {
-			if (mask->dst == cpu_to_be16(0xffff)) {
-				field_flags |= I40EVF_CLOUD_FIELD_IIP;
-			} else {
-				dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
-					be16_to_cpu(mask->dst));
-				return I40E_ERR_CONFIG;
-			}
-		}
-		if (key->dst) {
-			vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
-			vf->data.tcp_spec.dst_port = key->dst;
-		}
-
-		if (key->src) {
-			vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
-			vf->data.tcp_spec.src_port = key->src;
-		}
-	}
-	vf->field_flags = field_flags;
-
-	return 0;
-}
-
-/**
- * i40evf_handle_tclass - Forward to a traffic class on the device
- * @adapter: board private structure
- * @tc: traffic class index on the device
- * @filter: pointer to cloud filter structure
- */
-static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
-				struct i40evf_cloud_filter *filter)
-{
-	if (tc == 0)
-		return 0;
-	if (tc < adapter->num_tc) {
-		if (!filter->f.data.tcp_spec.dst_port) {
-			dev_err(&adapter->pdev->dev,
-				"Specify destination port to redirect to traffic class other than TC0\n");
-			return -EINVAL;
-		}
-	}
-	/* redirect to a traffic class on the same device */
-	filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
-	filter->f.action_meta = tc;
-	return 0;
-}
-
-/**
- * i40evf_configure_clsflower - Add tc flower filters
- * @adapter: board private structure
- * @cls_flower: Pointer to struct tc_cls_flower_offload
- */
-static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
-				      struct tc_cls_flower_offload *cls_flower)
-{
-	int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
-	struct i40evf_cloud_filter *filter = NULL;
-	int err = -EINVAL, count = 50;
-
-	if (tc < 0) {
-		dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
-		return -EINVAL;
-	}
-
-	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
-	if (!filter)
-		return -ENOMEM;
-
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section)) {
-		if (--count == 0)
-			goto err;
-		udelay(1);
-	}
-
-	filter->cookie = cls_flower->cookie;
-
-	/* set the mask to all zeroes to begin with */
-	memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
-	/* start out with flow type and eth type IPv4 to begin with */
-	filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
-	err = i40evf_parse_cls_flower(adapter, cls_flower, filter);
-	if (err < 0)
-		goto err;
-
-	err = i40evf_handle_tclass(adapter, tc, filter);
-	if (err < 0)
-		goto err;
-
-	/* add filter to the list */
-	spin_lock_bh(&adapter->cloud_filter_list_lock);
-	list_add_tail(&filter->list, &adapter->cloud_filter_list);
-	adapter->num_cloud_filters++;
-	filter->add = true;
-	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
-	spin_unlock_bh(&adapter->cloud_filter_list_lock);
-err:
-	if (err)
-		kfree(filter);
-
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-	return err;
-}
-
-/* i40evf_find_cf - Find the cloud filter in the list
- * @adapter: Board private structure
- * @cookie: filter specific cookie
- *
- * Returns ptr to the filter object or NULL. Must be called while holding the
- * cloud_filter_list_lock.
- */
-static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter,
-						  unsigned long *cookie)
-{
-	struct i40evf_cloud_filter *filter = NULL;
-
-	if (!cookie)
-		return NULL;
-
-	list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
-		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
-			return filter;
-	}
-	return NULL;
-}
-
-/**
- * i40evf_delete_clsflower - Remove tc flower filters
- * @adapter: board private structure
- * @cls_flower: Pointer to struct tc_cls_flower_offload
- */
-static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
-				   struct tc_cls_flower_offload *cls_flower)
-{
-	struct i40evf_cloud_filter *filter = NULL;
-	int err = 0;
-
-	spin_lock_bh(&adapter->cloud_filter_list_lock);
-	filter = i40evf_find_cf(adapter, &cls_flower->cookie);
-	if (filter) {
-		filter->del = true;
-		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
-	} else {
-		err = -EINVAL;
-	}
-	spin_unlock_bh(&adapter->cloud_filter_list_lock);
-
-	return err;
-}
-
-/**
- * i40evf_setup_tc_cls_flower - flower classifier offloads
- * @netdev: net device to configure
- * @type_data: offload data
- */
-static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
-				      struct tc_cls_flower_offload *cls_flower)
-{
-	if (cls_flower->common.chain_index)
-		return -EOPNOTSUPP;
-
-	switch (cls_flower->command) {
-	case TC_CLSFLOWER_REPLACE:
-		return i40evf_configure_clsflower(adapter, cls_flower);
-	case TC_CLSFLOWER_DESTROY:
-		return i40evf_delete_clsflower(adapter, cls_flower);
-	case TC_CLSFLOWER_STATS:
-		return -EOPNOTSUPP;
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-/**
- * i40evf_setup_tc_block_cb - block callback for tc
- * @type: type of offload
- * @type_data: offload data
- * @cb_priv:
- *
- * This function is the block callback for traffic classes
- **/
-static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-				    void *cb_priv)
-{
-	switch (type) {
-	case TC_SETUP_CLSFLOWER:
-		return i40evf_setup_tc_cls_flower(cb_priv, type_data);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-/**
- * i40evf_setup_tc_block - register callbacks for tc
- * @netdev: network interface device structure
- * @f: tc offload data
- *
- * This function registers block callbacks for tc
- * offloads
- **/
-static int i40evf_setup_tc_block(struct net_device *dev,
-				 struct tc_block_offload *f)
-{
-	struct i40evf_adapter *adapter = netdev_priv(dev);
-
-	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
-		return -EOPNOTSUPP;
-
-	switch (f->command) {
-	case TC_BLOCK_BIND:
-		return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
-					     adapter, adapter, f->extack);
-	case TC_BLOCK_UNBIND:
-		tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
-					adapter);
-		return 0;
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-/**
- * i40evf_setup_tc - configure multiple traffic classes
- * @netdev: network interface device structure
- * @type: type of offload
- * @type_date: tc offload data
- *
- * This function is the callback to ndo_setup_tc in the
- * netdev_ops.
- *
- * Returns 0 on success
- **/
-static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
-			   void *type_data)
-{
-	switch (type) {
-	case TC_SETUP_QDISC_MQPRIO:
-		return __i40evf_setup_tc(netdev, type_data);
-	case TC_SETUP_BLOCK:
-		return i40evf_setup_tc_block(netdev, type_data);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-/**
- * i40evf_open - Called when a network interface is made active
- * @netdev: network interface device structure
- *
- * Returns 0 on success, negative value on failure
- *
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP).  At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
- **/
-static int i40evf_open(struct net_device *netdev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	int err;
-
-	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
-		dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
-		return -EIO;
-	}
-
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section))
-		usleep_range(500, 1000);
-
-	if (adapter->state != __I40EVF_DOWN) {
-		err = -EBUSY;
-		goto err_unlock;
-	}
-
-	/* allocate transmit descriptors */
-	err = i40evf_setup_all_tx_resources(adapter);
-	if (err)
-		goto err_setup_tx;
-
-	/* allocate receive descriptors */
-	err = i40evf_setup_all_rx_resources(adapter);
-	if (err)
-		goto err_setup_rx;
-
-	/* clear any pending interrupts, may auto mask */
-	err = i40evf_request_traffic_irqs(adapter, netdev->name);
-	if (err)
-		goto err_req_irq;
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	i40evf_add_filter(adapter, adapter->hw.mac.addr);
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	i40evf_configure(adapter);
-
-	i40evf_up_complete(adapter);
-
-	i40evf_irq_enable(adapter, true);
-
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-
-	return 0;
-
-err_req_irq:
-	i40evf_down(adapter);
-	i40evf_free_traffic_irqs(adapter);
-err_setup_rx:
-	i40evf_free_all_rx_resources(adapter);
-err_setup_tx:
-	i40evf_free_all_tx_resources(adapter);
-err_unlock:
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-
-	return err;
-}
-
-/**
- * i40evf_close - Disables a network interface
- * @netdev: network interface device structure
- *
- * Returns 0, this is not allowed to fail
- *
- * The close entry point is called when an interface is de-activated
- * by the OS.  The hardware is still under the drivers control, but
- * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
- * are freed, along with all transmit and receive resources.
- **/
-static int i40evf_close(struct net_device *netdev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	int status;
-
-	if (adapter->state <= __I40EVF_DOWN_PENDING)
-		return 0;
-
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section))
-		usleep_range(500, 1000);
-
-	set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
-	if (CLIENT_ENABLED(adapter))
-		adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
-
-	i40evf_down(adapter);
-	adapter->state = __I40EVF_DOWN_PENDING;
-	i40evf_free_traffic_irqs(adapter);
-
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-
-	/* We explicitly don't free resources here because the hardware is
-	 * still active and can DMA into memory. Resources are cleared in
-	 * i40evf_virtchnl_completion() after we get confirmation from the PF
-	 * driver that the rings have been stopped.
-	 *
-	 * Also, we wait for state to transition to __I40EVF_DOWN before
-	 * returning. State change occurs in i40evf_virtchnl_completion() after
-	 * VF resources are released (which occurs after PF driver processes and
-	 * responds to admin queue commands).
-	 */
-
-	status = wait_event_timeout(adapter->down_waitqueue,
-				    adapter->state == __I40EVF_DOWN,
-				    msecs_to_jiffies(200));
-	if (!status)
-		netdev_warn(netdev, "Device resources not yet released\n");
-	return 0;
-}
-
-/**
- * i40evf_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
- *
- * Returns 0 on success, negative on failure
- **/
-static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	netdev->mtu = new_mtu;
-	if (CLIENT_ENABLED(adapter)) {
-		i40evf_notify_client_l2_params(&adapter->vsi);
-		adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
-	}
-	adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
-	schedule_work(&adapter->reset_task);
-
-	return 0;
-}
-
-/**
- * i40e_set_features - set the netdev feature flags
- * @netdev: ptr to the netdev being adjusted
- * @features: the feature set that the stack is suggesting
- * Note: expects to be called while under rtnl_lock()
- **/
-static int i40evf_set_features(struct net_device *netdev,
-			       netdev_features_t features)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	/* Don't allow changing VLAN_RX flag when adapter is not capable
-	 * of VLAN offload
-	 */
-	if (!VLAN_ALLOWED(adapter)) {
-		if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
-			return -EINVAL;
-	} else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
-		if (features & NETIF_F_HW_VLAN_CTAG_RX)
-			adapter->aq_required |=
-				I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
-		else
-			adapter->aq_required |=
-				I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
-	}
-
-	return 0;
-}
-
-/**
- * i40evf_features_check - Validate encapsulated packet conforms to limits
- * @skb: skb buff
- * @dev: This physical port's netdev
- * @features: Offload features that the stack believes apply
- **/
-static netdev_features_t i40evf_features_check(struct sk_buff *skb,
-					       struct net_device *dev,
-					       netdev_features_t features)
-{
-	size_t len;
-
-	/* No point in doing any of this if neither checksum nor GSO are
-	 * being requested for this frame.  We can rule out both by just
-	 * checking for CHECKSUM_PARTIAL
-	 */
-	if (skb->ip_summed != CHECKSUM_PARTIAL)
-		return features;
-
-	/* We cannot support GSO if the MSS is going to be less than
-	 * 64 bytes.  If it is then we need to drop support for GSO.
-	 */
-	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
-		features &= ~NETIF_F_GSO_MASK;
-
-	/* MACLEN can support at most 63 words */
-	len = skb_network_header(skb) - skb->data;
-	if (len & ~(63 * 2))
-		goto out_err;
-
-	/* IPLEN and EIPLEN can support at most 127 dwords */
-	len = skb_transport_header(skb) - skb_network_header(skb);
-	if (len & ~(127 * 4))
-		goto out_err;
-
-	if (skb->encapsulation) {
-		/* L4TUNLEN can support 127 words */
-		len = skb_inner_network_header(skb) - skb_transport_header(skb);
-		if (len & ~(127 * 2))
-			goto out_err;
-
-		/* IPLEN can support at most 127 dwords */
-		len = skb_inner_transport_header(skb) -
-		      skb_inner_network_header(skb);
-		if (len & ~(127 * 4))
-			goto out_err;
-	}
-
-	/* No need to validate L4LEN as TCP is the only protocol with a
-	 * a flexible value and we support all possible values supported
-	 * by TCP, which is at most 15 dwords
-	 */
-
-	return features;
-out_err:
-	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
-}
-
-/**
- * i40evf_fix_features - fix up the netdev feature bits
- * @netdev: our net device
- * @features: desired feature bits
- *
- * Returns fixed-up features bits
- **/
-static netdev_features_t i40evf_fix_features(struct net_device *netdev,
-					     netdev_features_t features)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	if (adapter->vf_res &&
-	    !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
-		features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
-			      NETIF_F_HW_VLAN_CTAG_RX |
-			      NETIF_F_HW_VLAN_CTAG_FILTER);
-
-	return features;
-}
-
-static const struct net_device_ops i40evf_netdev_ops = {
-	.ndo_open		= i40evf_open,
-	.ndo_stop		= i40evf_close,
-	.ndo_start_xmit		= i40evf_xmit_frame,
-	.ndo_set_rx_mode	= i40evf_set_rx_mode,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= i40evf_set_mac,
-	.ndo_change_mtu		= i40evf_change_mtu,
-	.ndo_tx_timeout		= i40evf_tx_timeout,
-	.ndo_vlan_rx_add_vid	= i40evf_vlan_rx_add_vid,
-	.ndo_vlan_rx_kill_vid	= i40evf_vlan_rx_kill_vid,
-	.ndo_features_check	= i40evf_features_check,
-	.ndo_fix_features	= i40evf_fix_features,
-	.ndo_set_features	= i40evf_set_features,
-	.ndo_setup_tc		= i40evf_setup_tc,
-};
-
-/**
- * i40evf_check_reset_complete - check that VF reset is complete
- * @hw: pointer to hw struct
- *
- * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
- **/
-static int i40evf_check_reset_complete(struct i40e_hw *hw)
-{
-	u32 rstat;
-	int i;
-
-	for (i = 0; i < 100; i++) {
-		rstat = rd32(hw, I40E_VFGEN_RSTAT) &
-			    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-		if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
-		    (rstat == VIRTCHNL_VFR_COMPLETED))
-			return 0;
-		usleep_range(10, 20);
-	}
-	return -EBUSY;
-}
-
-/**
- * i40evf_process_config - Process the config information we got from the PF
- * @adapter: board private structure
- *
- * Verify that we have a valid config struct, and set up our netdev features
- * and our VSI struct.
- **/
-int i40evf_process_config(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_vf_resource *vfres = adapter->vf_res;
-	int i, num_req_queues = adapter->num_req_queues;
-	struct net_device *netdev = adapter->netdev;
-	struct i40e_vsi *vsi = &adapter->vsi;
-	netdev_features_t hw_enc_features;
-	netdev_features_t hw_features;
-
-	/* got VF config message back from PF, now we can parse it */
-	for (i = 0; i < vfres->num_vsis; i++) {
-		if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
-			adapter->vsi_res = &vfres->vsi_res[i];
-	}
-	if (!adapter->vsi_res) {
-		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
-		return -ENODEV;
-	}
-
-	if (num_req_queues &&
-	    num_req_queues != adapter->vsi_res->num_queue_pairs) {
-		/* Problem.  The PF gave us fewer queues than what we had
-		 * negotiated in our request.  Need a reset to see if we can't
-		 * get back to a working state.
-		 */
-		dev_err(&adapter->pdev->dev,
-			"Requested %d queues, but PF only gave us %d.\n",
-			num_req_queues,
-			adapter->vsi_res->num_queue_pairs);
-		adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
-		adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
-		i40evf_schedule_reset(adapter);
-		return -ENODEV;
-	}
-	adapter->num_req_queues = 0;
-
-	hw_enc_features = NETIF_F_SG			|
-			  NETIF_F_IP_CSUM		|
-			  NETIF_F_IPV6_CSUM		|
-			  NETIF_F_HIGHDMA		|
-			  NETIF_F_SOFT_FEATURES	|
-			  NETIF_F_TSO			|
-			  NETIF_F_TSO_ECN		|
-			  NETIF_F_TSO6			|
-			  NETIF_F_SCTP_CRC		|
-			  NETIF_F_RXHASH		|
-			  NETIF_F_RXCSUM		|
-			  0;
-
-	/* advertise to stack only if offloads for encapsulated packets is
-	 * supported
-	 */
-	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
-		hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL	|
-				   NETIF_F_GSO_GRE		|
-				   NETIF_F_GSO_GRE_CSUM		|
-				   NETIF_F_GSO_IPXIP4		|
-				   NETIF_F_GSO_IPXIP6		|
-				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
-				   NETIF_F_GSO_PARTIAL		|
-				   0;
-
-		if (!(vfres->vf_cap_flags &
-		      VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
-			netdev->gso_partial_features |=
-				NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
-		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
-		netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
-		netdev->hw_enc_features |= hw_enc_features;
-	}
-	/* record features VLANs can make use of */
-	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
-
-	/* Write features and hw_features separately to avoid polluting
-	 * with, or dropping, features that are set when we registered.
-	 */
-	hw_features = hw_enc_features;
-
-	/* Enable VLAN features if supported */
-	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
-		hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
-				NETIF_F_HW_VLAN_CTAG_RX);
-	/* Enable cloud filter if ADQ is supported */
-	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
-		hw_features |= NETIF_F_HW_TC;
-
-	netdev->hw_features |= hw_features;
-
-	netdev->features |= hw_features;
-
-	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
-		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
-	netdev->priv_flags |= IFF_UNICAST_FLT;
-
-	/* Do not turn on offloads when they are requested to be turned off.
-	 * TSO needs minimum 576 bytes to work correctly.
-	 */
-	if (netdev->wanted_features) {
-		if (!(netdev->wanted_features & NETIF_F_TSO) ||
-		    netdev->mtu < 576)
-			netdev->features &= ~NETIF_F_TSO;
-		if (!(netdev->wanted_features & NETIF_F_TSO6) ||
-		    netdev->mtu < 576)
-			netdev->features &= ~NETIF_F_TSO6;
-		if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
-			netdev->features &= ~NETIF_F_TSO_ECN;
-		if (!(netdev->wanted_features & NETIF_F_GRO))
-			netdev->features &= ~NETIF_F_GRO;
-		if (!(netdev->wanted_features & NETIF_F_GSO))
-			netdev->features &= ~NETIF_F_GSO;
-	}
-
-	adapter->vsi.id = adapter->vsi_res->vsi_id;
-
-	adapter->vsi.back = adapter;
-	adapter->vsi.base_vector = 1;
-	adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
-	vsi->netdev = adapter->netdev;
-	vsi->qs_handle = adapter->vsi_res->qset_handle;
-	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
-		adapter->rss_key_size = vfres->rss_key_size;
-		adapter->rss_lut_size = vfres->rss_lut_size;
-	} else {
-		adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
-		adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
-	}
-
-	return 0;
-}
-
-/**
- * i40evf_init_task - worker thread to perform delayed initialization
- * @work: pointer to work_struct containing our data
- *
- * This task completes the work that was begun in probe. Due to the nature
- * of VF-PF communications, we may need to wait tens of milliseconds to get
- * responses back from the PF. Rather than busy-wait in probe and bog down the
- * whole system, we'll do it in a task so we can sleep.
- * This task only runs during driver init. Once we've established
- * communications with the PF driver and set up our netdev, the watchdog
- * takes over.
- **/
-static void i40evf_init_task(struct work_struct *work)
-{
-	struct i40evf_adapter *adapter = container_of(work,
-						      struct i40evf_adapter,
-						      init_task.work);
-	struct net_device *netdev = adapter->netdev;
-	struct i40e_hw *hw = &adapter->hw;
-	struct pci_dev *pdev = adapter->pdev;
-	int err, bufsz;
-
-	switch (adapter->state) {
-	case __I40EVF_STARTUP:
-		/* driver loaded, probe complete */
-		adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
-		adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
-		err = i40e_set_mac_type(hw);
-		if (err) {
-			dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
-				err);
-			goto err;
-		}
-		err = i40evf_check_reset_complete(hw);
-		if (err) {
-			dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
-				 err);
-			goto err;
-		}
-		hw->aq.num_arq_entries = I40EVF_AQ_LEN;
-		hw->aq.num_asq_entries = I40EVF_AQ_LEN;
-		hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
-		hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
-
-		err = i40evf_init_adminq(hw);
-		if (err) {
-			dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
-				err);
-			goto err;
-		}
-		err = i40evf_send_api_ver(adapter);
-		if (err) {
-			dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
-			i40evf_shutdown_adminq(hw);
-			goto err;
-		}
-		adapter->state = __I40EVF_INIT_VERSION_CHECK;
-		goto restart;
-	case __I40EVF_INIT_VERSION_CHECK:
-		if (!i40evf_asq_done(hw)) {
-			dev_err(&pdev->dev, "Admin queue command never completed\n");
-			i40evf_shutdown_adminq(hw);
-			adapter->state = __I40EVF_STARTUP;
-			goto err;
-		}
-
-		/* aq msg sent, awaiting reply */
-		err = i40evf_verify_api_ver(adapter);
-		if (err) {
-			if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
-				err = i40evf_send_api_ver(adapter);
-			else
-				dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
-					adapter->pf_version.major,
-					adapter->pf_version.minor,
-					VIRTCHNL_VERSION_MAJOR,
-					VIRTCHNL_VERSION_MINOR);
-			goto err;
-		}
-		err = i40evf_send_vf_config_msg(adapter);
-		if (err) {
-			dev_err(&pdev->dev, "Unable to send config request (%d)\n",
-				err);
-			goto err;
-		}
-		adapter->state = __I40EVF_INIT_GET_RESOURCES;
-		goto restart;
-	case __I40EVF_INIT_GET_RESOURCES:
-		/* aq msg sent, awaiting reply */
-		if (!adapter->vf_res) {
-			bufsz = sizeof(struct virtchnl_vf_resource) +
-				(I40E_MAX_VF_VSI *
-				 sizeof(struct virtchnl_vsi_resource));
-			adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
-			if (!adapter->vf_res)
-				goto err;
-		}
-		err = i40evf_get_vf_config(adapter);
-		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
-			err = i40evf_send_vf_config_msg(adapter);
-			goto err;
-		} else if (err == I40E_ERR_PARAM) {
-			/* We only get ERR_PARAM if the device is in a very bad
-			 * state or if we've been disabled for previous bad
-			 * behavior. Either way, we're done now.
-			 */
-			i40evf_shutdown_adminq(hw);
-			dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
-			return;
-		}
-		if (err) {
-			dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
-				err);
-			goto err_alloc;
-		}
-		adapter->state = __I40EVF_INIT_SW;
-		break;
-	default:
-		goto err_alloc;
-	}
-
-	if (i40evf_process_config(adapter))
-		goto err_alloc;
-	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-
-	adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
-
-	netdev->netdev_ops = &i40evf_netdev_ops;
-	i40evf_set_ethtool_ops(netdev);
-	netdev->watchdog_timeo = 5 * HZ;
-
-	/* MTU range: 68 - 9710 */
-	netdev->min_mtu = ETH_MIN_MTU;
-	netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
-
-	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
-		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
-			 adapter->hw.mac.addr);
-		eth_hw_addr_random(netdev);
-		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
-	} else {
-		adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
-		ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
-		ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
-	}
-
-	timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0);
-	mod_timer(&adapter->watchdog_timer, jiffies + 1);
-
-	adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
-	adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
-	err = i40evf_init_interrupt_scheme(adapter);
-	if (err)
-		goto err_sw_init;
-	i40evf_map_rings_to_vectors(adapter);
-	if (adapter->vf_res->vf_cap_flags &
-	    VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
-		adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
-
-	err = i40evf_request_misc_irq(adapter);
-	if (err)
-		goto err_sw_init;
-
-	netif_carrier_off(netdev);
-	adapter->link_up = false;
-
-	if (!adapter->netdev_registered) {
-		err = register_netdev(netdev);
-		if (err)
-			goto err_register;
-	}
-
-	adapter->netdev_registered = true;
-
-	netif_tx_stop_all_queues(netdev);
-	if (CLIENT_ALLOWED(adapter)) {
-		err = i40evf_lan_add_device(adapter);
-		if (err)
-			dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
-				 err);
-	}
-
-	dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
-	if (netdev->features & NETIF_F_GRO)
-		dev_info(&pdev->dev, "GRO is enabled\n");
-
-	adapter->state = __I40EVF_DOWN;
-	set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
-	i40evf_misc_irq_enable(adapter);
-	wake_up(&adapter->down_waitqueue);
-
-	adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
-	adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
-	if (!adapter->rss_key || !adapter->rss_lut)
-		goto err_mem;
-
-	if (RSS_AQ(adapter)) {
-		adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
-		mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
-	} else {
-		i40evf_init_rss(adapter);
-	}
-	return;
-restart:
-	schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
-	return;
-err_mem:
-	i40evf_free_rss(adapter);
-err_register:
-	i40evf_free_misc_irq(adapter);
-err_sw_init:
-	i40evf_reset_interrupt_capability(adapter);
-err_alloc:
-	kfree(adapter->vf_res);
-	adapter->vf_res = NULL;
-err:
-	/* Things went into the weeds, so try again later */
-	if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
-		dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
-		adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
-		i40evf_shutdown_adminq(hw);
-		adapter->state = __I40EVF_STARTUP;
-		schedule_delayed_work(&adapter->init_task, HZ * 5);
-		return;
-	}
-	schedule_delayed_work(&adapter->init_task, HZ);
-}
-
-/**
- * i40evf_shutdown - Shutdown the device in preparation for a reboot
- * @pdev: pci device structure
- **/
-static void i40evf_shutdown(struct pci_dev *pdev)
-{
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-
-	netif_device_detach(netdev);
-
-	if (netif_running(netdev))
-		i40evf_close(netdev);
-
-	/* Prevent the watchdog from running. */
-	adapter->state = __I40EVF_REMOVE;
-	adapter->aq_required = 0;
-
-#ifdef CONFIG_PM
-	pci_save_state(pdev);
-
-#endif
-	pci_disable_device(pdev);
-}
-
-/**
- * i40evf_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in i40evf_pci_tbl
- *
- * Returns 0 on success, negative on failure
- *
- * i40evf_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
- **/
-static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	struct net_device *netdev;
-	struct i40evf_adapter *adapter = NULL;
-	struct i40e_hw *hw = NULL;
-	int err;
-
-	err = pci_enable_device(pdev);
-	if (err)
-		return err;
-
-	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-	if (err) {
-		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-		if (err) {
-			dev_err(&pdev->dev,
-				"DMA configuration failed: 0x%x\n", err);
-			goto err_dma;
-		}
-	}
-
-	err = pci_request_regions(pdev, i40evf_driver_name);
-	if (err) {
-		dev_err(&pdev->dev,
-			"pci_request_regions failed 0x%x\n", err);
-		goto err_pci_reg;
-	}
-
-	pci_enable_pcie_error_reporting(pdev);
-
-	pci_set_master(pdev);
-
-	netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
-				   I40EVF_MAX_REQ_QUEUES);
-	if (!netdev) {
-		err = -ENOMEM;
-		goto err_alloc_etherdev;
-	}
-
-	SET_NETDEV_DEV(netdev, &pdev->dev);
-
-	pci_set_drvdata(pdev, netdev);
-	adapter = netdev_priv(netdev);
-
-	adapter->netdev = netdev;
-	adapter->pdev = pdev;
-
-	hw = &adapter->hw;
-	hw->back = adapter;
-
-	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
-	adapter->state = __I40EVF_STARTUP;
-
-	/* Call save state here because it relies on the adapter struct. */
-	pci_save_state(pdev);
-
-	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
-			      pci_resource_len(pdev, 0));
-	if (!hw->hw_addr) {
-		err = -EIO;
-		goto err_ioremap;
-	}
-	hw->vendor_id = pdev->vendor;
-	hw->device_id = pdev->device;
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
-	hw->subsystem_vendor_id = pdev->subsystem_vendor;
-	hw->subsystem_device_id = pdev->subsystem_device;
-	hw->bus.device = PCI_SLOT(pdev->devfn);
-	hw->bus.func = PCI_FUNC(pdev->devfn);
-	hw->bus.bus_id = pdev->bus->number;
-
-	/* set up the locks for the AQ, do this only once in probe
-	 * and destroy them only once in remove
-	 */
-	mutex_init(&hw->aq.asq_mutex);
-	mutex_init(&hw->aq.arq_mutex);
-
-	spin_lock_init(&adapter->mac_vlan_list_lock);
-	spin_lock_init(&adapter->cloud_filter_list_lock);
-
-	INIT_LIST_HEAD(&adapter->mac_filter_list);
-	INIT_LIST_HEAD(&adapter->vlan_filter_list);
-	INIT_LIST_HEAD(&adapter->cloud_filter_list);
-
-	INIT_WORK(&adapter->reset_task, i40evf_reset_task);
-	INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
-	INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
-	INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
-	INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
-	schedule_delayed_work(&adapter->init_task,
-			      msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
-
-	/* Setup the wait queue for indicating transition to down status */
-	init_waitqueue_head(&adapter->down_waitqueue);
-
-	return 0;
-
-err_ioremap:
-	free_netdev(netdev);
-err_alloc_etherdev:
-	pci_disable_pcie_error_reporting(pdev);
-	pci_release_regions(pdev);
-err_pci_reg:
-err_dma:
-	pci_disable_device(pdev);
-	return err;
-}
-
-#ifdef CONFIG_PM
-/**
- * i40evf_suspend - Power management suspend routine
- * @pdev: PCI device information struct
- * @state: unused
- *
- * Called when the system (VM) is entering sleep/suspend.
- **/
-static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	int retval = 0;
-
-	netif_device_detach(netdev);
-
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section))
-		usleep_range(500, 1000);
-
-	if (netif_running(netdev)) {
-		rtnl_lock();
-		i40evf_down(adapter);
-		rtnl_unlock();
-	}
-	i40evf_free_misc_irq(adapter);
-	i40evf_reset_interrupt_capability(adapter);
-
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-
-	retval = pci_save_state(pdev);
-	if (retval)
-		return retval;
-
-	pci_disable_device(pdev);
-
-	return 0;
-}
-
-/**
- * i40evf_resume - Power management resume routine
- * @pdev: PCI device information struct
- *
- * Called when the system (VM) is resumed from sleep/suspend.
- **/
-static int i40evf_resume(struct pci_dev *pdev)
-{
-	struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
-	struct net_device *netdev = adapter->netdev;
-	u32 err;
-
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	/* pci_restore_state clears dev->state_saved so call
-	 * pci_save_state to restore it.
-	 */
-	pci_save_state(pdev);
-
-	err = pci_enable_device_mem(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
-		return err;
-	}
-	pci_set_master(pdev);
-
-	rtnl_lock();
-	err = i40evf_set_interrupt_capability(adapter);
-	if (err) {
-		rtnl_unlock();
-		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
-		return err;
-	}
-	err = i40evf_request_misc_irq(adapter);
-	rtnl_unlock();
-	if (err) {
-		dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
-		return err;
-	}
-
-	schedule_work(&adapter->reset_task);
-
-	netif_device_attach(netdev);
-
-	return err;
-}
-
-#endif /* CONFIG_PM */
-/**
- * i40evf_remove - Device Removal Routine
- * @pdev: PCI device information struct
- *
- * i40evf_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.  The could be caused by a
- * Hot-Plug event, or because the driver is going to be removed from
- * memory.
- **/
-static void i40evf_remove(struct pci_dev *pdev)
-{
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct i40evf_vlan_filter *vlf, *vlftmp;
-	struct i40evf_mac_filter *f, *ftmp;
-	struct i40evf_cloud_filter *cf, *cftmp;
-	struct i40e_hw *hw = &adapter->hw;
-	int err;
-	/* Indicate we are in remove and not to run reset_task */
-	set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
-	cancel_delayed_work_sync(&adapter->init_task);
-	cancel_work_sync(&adapter->reset_task);
-	cancel_delayed_work_sync(&adapter->client_task);
-	if (adapter->netdev_registered) {
-		unregister_netdev(netdev);
-		adapter->netdev_registered = false;
-	}
-	if (CLIENT_ALLOWED(adapter)) {
-		err = i40evf_lan_del_device(adapter);
-		if (err)
-			dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
-				 err);
-	}
-
-	/* Shut down all the garbage mashers on the detention level */
-	adapter->state = __I40EVF_REMOVE;
-	adapter->aq_required = 0;
-	adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
-	i40evf_request_reset(adapter);
-	msleep(50);
-	/* If the FW isn't responding, kick it once, but only once. */
-	if (!i40evf_asq_done(hw)) {
-		i40evf_request_reset(adapter);
-		msleep(50);
-	}
-	i40evf_free_all_tx_resources(adapter);
-	i40evf_free_all_rx_resources(adapter);
-	i40evf_misc_irq_disable(adapter);
-	i40evf_free_misc_irq(adapter);
-	i40evf_reset_interrupt_capability(adapter);
-	i40evf_free_q_vectors(adapter);
-
-	if (adapter->watchdog_timer.function)
-		del_timer_sync(&adapter->watchdog_timer);
-
-	cancel_work_sync(&adapter->adminq_task);
-
-	i40evf_free_rss(adapter);
-
-	if (hw->aq.asq.count)
-		i40evf_shutdown_adminq(hw);
-
-	/* destroy the locks only once, here */
-	mutex_destroy(&hw->aq.arq_mutex);
-	mutex_destroy(&hw->aq.asq_mutex);
-
-	iounmap(hw->hw_addr);
-	pci_release_regions(pdev);
-	i40evf_free_all_tx_resources(adapter);
-	i40evf_free_all_rx_resources(adapter);
-	i40evf_free_queues(adapter);
-	kfree(adapter->vf_res);
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-	/* If we got removed before an up/down sequence, we've got a filter
-	 * hanging out there that we need to get rid of.
-	 */
-	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
-		list_del(&f->list);
-		kfree(f);
-	}
-	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
-				 list) {
-		list_del(&vlf->list);
-		kfree(vlf);
-	}
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	spin_lock_bh(&adapter->cloud_filter_list_lock);
-	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
-		list_del(&cf->list);
-		kfree(cf);
-	}
-	spin_unlock_bh(&adapter->cloud_filter_list_lock);
-
-	free_netdev(netdev);
-
-	pci_disable_pcie_error_reporting(pdev);
-
-	pci_disable_device(pdev);
-}
-
-static struct pci_driver i40evf_driver = {
-	.name     = i40evf_driver_name,
-	.id_table = i40evf_pci_tbl,
-	.probe    = i40evf_probe,
-	.remove   = i40evf_remove,
-#ifdef CONFIG_PM
-	.suspend  = i40evf_suspend,
-	.resume   = i40evf_resume,
-#endif
-	.shutdown = i40evf_shutdown,
-};
-
-/**
- * i40e_init_module - Driver Registration Routine
- *
- * i40e_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- **/
-static int __init i40evf_init_module(void)
-{
-	int ret;
-
-	pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
-		i40evf_driver_version);
-
-	pr_info("%s\n", i40evf_copyright);
-
-	i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
-				    i40evf_driver_name);
-	if (!i40evf_wq) {
-		pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
-		return -ENOMEM;
-	}
-	ret = pci_register_driver(&i40evf_driver);
-	return ret;
-}
-
-module_init(i40evf_init_module);
-
-/**
- * i40e_exit_module - Driver Exit Cleanup Routine
- *
- * i40e_exit_module is called just before the driver is removed
- * from memory.
- **/
-static void __exit i40evf_exit_module(void)
-{
-	pci_unregister_driver(&i40evf_driver);
-	destroy_workqueue(i40evf_wq);
-}
-
-module_exit(i40evf_exit_module);
-
-/* i40evf_main.c */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
deleted file mode 100644
index 94dabc9d89f7..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ /dev/null
@@ -1,1458 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#include "i40evf.h"
-#include "i40e_prototype.h"
-#include "i40evf_client.h"
-
-/* busy wait delay in msec */
-#define I40EVF_BUSY_WAIT_DELAY 10
-#define I40EVF_BUSY_WAIT_COUNT 50
-
-/**
- * i40evf_send_pf_msg
- * @adapter: adapter structure
- * @op: virtual channel opcode
- * @msg: pointer to message buffer
- * @len: message length
- *
- * Send message to PF and print status if failure.
- **/
-static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
-			      enum virtchnl_ops op, u8 *msg, u16 len)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	i40e_status err;
-
-	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
-		return 0; /* nothing to see here, move along */
-
-	err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
-	if (err)
-		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
-			op, i40evf_stat_str(hw, err),
-			i40evf_aq_str(hw, hw->aq.asq_last_status));
-	return err;
-}
-
-/**
- * i40evf_send_api_ver
- * @adapter: adapter structure
- *
- * Send API version admin queue message to the PF. The reply is not checked
- * in this function. Returns 0 if the message was successfully
- * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
- **/
-int i40evf_send_api_ver(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_version_info vvi;
-
-	vvi.major = VIRTCHNL_VERSION_MAJOR;
-	vvi.minor = VIRTCHNL_VERSION_MINOR;
-
-	return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
-				  sizeof(vvi));
-}
-
-/**
- * i40evf_verify_api_ver
- * @adapter: adapter structure
- *
- * Compare API versions with the PF. Must be called after admin queue is
- * initialized. Returns 0 if API versions match, -EIO if they do not,
- * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
- * from the firmware are propagated.
- **/
-int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_version_info *pf_vvi;
-	struct i40e_hw *hw = &adapter->hw;
-	struct i40e_arq_event_info event;
-	enum virtchnl_ops op;
-	i40e_status err;
-
-	event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
-	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
-	if (!event.msg_buf) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	while (1) {
-		err = i40evf_clean_arq_element(hw, &event, NULL);
-		/* When the AQ is empty, i40evf_clean_arq_element will return
-		 * nonzero and this loop will terminate.
-		 */
-		if (err)
-			goto out_alloc;
-		op =
-		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
-		if (op == VIRTCHNL_OP_VERSION)
-			break;
-	}
-
-
-	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
-	if (err)
-		goto out_alloc;
-
-	if (op != VIRTCHNL_OP_VERSION) {
-		dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
-			op);
-		err = -EIO;
-		goto out_alloc;
-	}
-
-	pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
-	adapter->pf_version = *pf_vvi;
-
-	if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
-	    ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
-	     (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
-		err = -EIO;
-
-out_alloc:
-	kfree(event.msg_buf);
-out:
-	return err;
-}
-
-/**
- * i40evf_send_vf_config_msg
- * @adapter: adapter structure
- *
- * Send VF configuration request admin queue message to the PF. The reply
- * is not checked in this function. Returns 0 if the message was
- * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
- **/
-int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
-{
-	u32 caps;
-
-	caps = VIRTCHNL_VF_OFFLOAD_L2 |
-	       VIRTCHNL_VF_OFFLOAD_RSS_PF |
-	       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
-	       VIRTCHNL_VF_OFFLOAD_RSS_REG |
-	       VIRTCHNL_VF_OFFLOAD_VLAN |
-	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
-	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
-	       VIRTCHNL_VF_OFFLOAD_ENCAP |
-	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
-	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
-	       VIRTCHNL_VF_OFFLOAD_ADQ;
-
-	adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
-	if (PF_IS_V11(adapter))
-		return i40evf_send_pf_msg(adapter,
-					  VIRTCHNL_OP_GET_VF_RESOURCES,
-					  (u8 *)&caps, sizeof(caps));
-	else
-		return i40evf_send_pf_msg(adapter,
-					  VIRTCHNL_OP_GET_VF_RESOURCES,
-					  NULL, 0);
-}
-
-/**
- * i40evf_validate_num_queues
- * @adapter: adapter structure
- *
- * Validate that the number of queues the PF has sent in
- * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
- **/
-static void i40evf_validate_num_queues(struct i40evf_adapter *adapter)
-{
-	if (adapter->vf_res->num_queue_pairs > I40EVF_MAX_REQ_QUEUES) {
-		struct virtchnl_vsi_resource *vsi_res;
-		int i;
-
-		dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
-			 adapter->vf_res->num_queue_pairs,
-			 I40EVF_MAX_REQ_QUEUES);
-		dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
-			 I40EVF_MAX_REQ_QUEUES);
-		adapter->vf_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES;
-		for (i = 0; i < adapter->vf_res->num_vsis; i++) {
-			vsi_res = &adapter->vf_res->vsi_res[i];
-			vsi_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES;
-		}
-	}
-}
-
-/**
- * i40evf_get_vf_config
- * @adapter: private adapter structure
- *
- * Get VF configuration from PF and populate hw structure. Must be called after
- * admin queue is initialized. Busy waits until response is received from PF,
- * with maximum timeout. Response from PF is returned in the buffer for further
- * processing by the caller.
- **/
-int i40evf_get_vf_config(struct i40evf_adapter *adapter)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	struct i40e_arq_event_info event;
-	enum virtchnl_ops op;
-	i40e_status err;
-	u16 len;
-
-	len =  sizeof(struct virtchnl_vf_resource) +
-		I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
-	event.buf_len = len;
-	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
-	if (!event.msg_buf) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	while (1) {
-		/* When the AQ is empty, i40evf_clean_arq_element will return
-		 * nonzero and this loop will terminate.
-		 */
-		err = i40evf_clean_arq_element(hw, &event, NULL);
-		if (err)
-			goto out_alloc;
-		op =
-		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
-		if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
-			break;
-	}
-
-	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
-	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
-
-	/* some PFs send more queues than we should have so validate that
-	 * we aren't getting too many queues
-	 */
-	if (!err)
-		i40evf_validate_num_queues(adapter);
-	i40e_vf_parse_hw_config(hw, adapter->vf_res);
-out_alloc:
-	kfree(event.msg_buf);
-out:
-	return err;
-}
-
-/**
- * i40evf_configure_queues
- * @adapter: adapter structure
- *
- * Request that the PF set up our (previously allocated) queues.
- **/
-void i40evf_configure_queues(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_vsi_queue_config_info *vqci;
-	struct virtchnl_queue_pair_info *vqpi;
-	int pairs = adapter->num_active_queues;
-	int i, len, max_frame = I40E_MAX_RXBUFFER;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
-	len = sizeof(struct virtchnl_vsi_queue_config_info) +
-		       (sizeof(struct virtchnl_queue_pair_info) * pairs);
-	vqci = kzalloc(len, GFP_KERNEL);
-	if (!vqci)
-		return;
-
-	/* Limit maximum frame size when jumbo frames is not enabled */
-	if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
-	    (adapter->netdev->mtu <= ETH_DATA_LEN))
-		max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
-
-	vqci->vsi_id = adapter->vsi_res->vsi_id;
-	vqci->num_queue_pairs = pairs;
-	vqpi = vqci->qpair;
-	/* Size check is not needed here - HW max is 16 queue pairs, and we
-	 * can fit info for 31 of them into the AQ buffer before it overflows.
-	 */
-	for (i = 0; i < pairs; i++) {
-		vqpi->txq.vsi_id = vqci->vsi_id;
-		vqpi->txq.queue_id = i;
-		vqpi->txq.ring_len = adapter->tx_rings[i].count;
-		vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
-		vqpi->rxq.vsi_id = vqci->vsi_id;
-		vqpi->rxq.queue_id = i;
-		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
-		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
-		vqpi->rxq.max_pkt_size = max_frame;
-		vqpi->rxq.databuffer_size =
-			ALIGN(adapter->rx_rings[i].rx_buf_len,
-			      BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
-		vqpi++;
-	}
-
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
-			   (u8 *)vqci, len);
-	kfree(vqci);
-}
-
-/**
- * i40evf_enable_queues
- * @adapter: adapter structure
- *
- * Request that the PF enable all of our queues.
- **/
-void i40evf_enable_queues(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_queue_select vqs;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
-	vqs.vsi_id = adapter->vsi_res->vsi_id;
-	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
-	vqs.rx_queues = vqs.tx_queues;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
-			   (u8 *)&vqs, sizeof(vqs));
-}
-
-/**
- * i40evf_disable_queues
- * @adapter: adapter structure
- *
- * Request that the PF disable all of our queues.
- **/
-void i40evf_disable_queues(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_queue_select vqs;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
-	vqs.vsi_id = adapter->vsi_res->vsi_id;
-	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
-	vqs.rx_queues = vqs.tx_queues;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
-			   (u8 *)&vqs, sizeof(vqs));
-}
-
-/**
- * i40evf_map_queues
- * @adapter: adapter structure
- *
- * Request that the PF map queues to interrupt vectors. Misc causes, including
- * admin queue, are always mapped to vector 0.
- **/
-void i40evf_map_queues(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_irq_map_info *vimi;
-	struct virtchnl_vector_map *vecmap;
-	int v_idx, q_vectors, len;
-	struct i40e_q_vector *q_vector;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
-
-	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-
-	len = sizeof(struct virtchnl_irq_map_info) +
-	      (adapter->num_msix_vectors *
-		sizeof(struct virtchnl_vector_map));
-	vimi = kzalloc(len, GFP_KERNEL);
-	if (!vimi)
-		return;
-
-	vimi->num_vectors = adapter->num_msix_vectors;
-	/* Queue vectors first */
-	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
-		q_vector = &adapter->q_vectors[v_idx];
-		vecmap = &vimi->vecmap[v_idx];
-
-		vecmap->vsi_id = adapter->vsi_res->vsi_id;
-		vecmap->vector_id = v_idx + NONQ_VECS;
-		vecmap->txq_map = q_vector->ring_mask;
-		vecmap->rxq_map = q_vector->ring_mask;
-		vecmap->rxitr_idx = I40E_RX_ITR;
-		vecmap->txitr_idx = I40E_TX_ITR;
-	}
-	/* Misc vector last - this is only for AdminQ messages */
-	vecmap = &vimi->vecmap[v_idx];
-	vecmap->vsi_id = adapter->vsi_res->vsi_id;
-	vecmap->vector_id = 0;
-	vecmap->txq_map = 0;
-	vecmap->rxq_map = 0;
-
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
-			   (u8 *)vimi, len);
-	kfree(vimi);
-}
-
-/**
- * i40evf_request_queues
- * @adapter: adapter structure
- * @num: number of requested queues
- *
- * We get a default number of queues from the PF.  This enables us to request a
- * different number.  Returns 0 on success, negative on failure
- **/
-int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
-{
-	struct virtchnl_vf_res_request vfres;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
-			adapter->current_op);
-		return -EBUSY;
-	}
-
-	vfres.num_queue_pairs = num;
-
-	adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
-	adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
-	return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
-				  (u8 *)&vfres, sizeof(vfres));
-}
-
-/**
- * i40evf_add_ether_addrs
- * @adapter: adapter structure
- *
- * Request that the PF add one or more addresses to our filters.
- **/
-void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_ether_addr_list *veal;
-	int len, i = 0, count = 0;
-	struct i40evf_mac_filter *f;
-	bool more = false;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	list_for_each_entry(f, &adapter->mac_filter_list, list) {
-		if (f->add)
-			count++;
-	}
-	if (!count) {
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-		spin_unlock_bh(&adapter->mac_vlan_list_lock);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
-
-	len = sizeof(struct virtchnl_ether_addr_list) +
-	      (count * sizeof(struct virtchnl_ether_addr));
-	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
-		count = (I40EVF_MAX_AQ_BUF_SIZE -
-			 sizeof(struct virtchnl_ether_addr_list)) /
-			sizeof(struct virtchnl_ether_addr);
-		len = sizeof(struct virtchnl_ether_addr_list) +
-		      (count * sizeof(struct virtchnl_ether_addr));
-		more = true;
-	}
-
-	veal = kzalloc(len, GFP_ATOMIC);
-	if (!veal) {
-		spin_unlock_bh(&adapter->mac_vlan_list_lock);
-		return;
-	}
-
-	veal->vsi_id = adapter->vsi_res->vsi_id;
-	veal->num_elements = count;
-	list_for_each_entry(f, &adapter->mac_filter_list, list) {
-		if (f->add) {
-			ether_addr_copy(veal->list[i].addr, f->macaddr);
-			i++;
-			f->add = false;
-			if (i == count)
-				break;
-		}
-	}
-	if (!more)
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
-			   (u8 *)veal, len);
-	kfree(veal);
-}
-
-/**
- * i40evf_del_ether_addrs
- * @adapter: adapter structure
- *
- * Request that the PF remove one or more addresses from our filters.
- **/
-void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_ether_addr_list *veal;
-	struct i40evf_mac_filter *f, *ftmp;
-	int len, i = 0, count = 0;
-	bool more = false;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	list_for_each_entry(f, &adapter->mac_filter_list, list) {
-		if (f->remove)
-			count++;
-	}
-	if (!count) {
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-		spin_unlock_bh(&adapter->mac_vlan_list_lock);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
-
-	len = sizeof(struct virtchnl_ether_addr_list) +
-	      (count * sizeof(struct virtchnl_ether_addr));
-	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
-		count = (I40EVF_MAX_AQ_BUF_SIZE -
-			 sizeof(struct virtchnl_ether_addr_list)) /
-			sizeof(struct virtchnl_ether_addr);
-		len = sizeof(struct virtchnl_ether_addr_list) +
-		      (count * sizeof(struct virtchnl_ether_addr));
-		more = true;
-	}
-	veal = kzalloc(len, GFP_ATOMIC);
-	if (!veal) {
-		spin_unlock_bh(&adapter->mac_vlan_list_lock);
-		return;
-	}
-
-	veal->vsi_id = adapter->vsi_res->vsi_id;
-	veal->num_elements = count;
-	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
-		if (f->remove) {
-			ether_addr_copy(veal->list[i].addr, f->macaddr);
-			i++;
-			list_del(&f->list);
-			kfree(f);
-			if (i == count)
-				break;
-		}
-	}
-	if (!more)
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
-			   (u8 *)veal, len);
-	kfree(veal);
-}
-
-/**
- * i40evf_add_vlans
- * @adapter: adapter structure
- *
- * Request that the PF add one or more VLAN filters to our VSI.
- **/
-void i40evf_add_vlans(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_vlan_filter_list *vvfl;
-	int len, i = 0, count = 0;
-	struct i40evf_vlan_filter *f;
-	bool more = false;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-		if (f->add)
-			count++;
-	}
-	if (!count) {
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
-		spin_unlock_bh(&adapter->mac_vlan_list_lock);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
-
-	len = sizeof(struct virtchnl_vlan_filter_list) +
-	      (count * sizeof(u16));
-	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
-		count = (I40EVF_MAX_AQ_BUF_SIZE -
-			 sizeof(struct virtchnl_vlan_filter_list)) /
-			sizeof(u16);
-		len = sizeof(struct virtchnl_vlan_filter_list) +
-		      (count * sizeof(u16));
-		more = true;
-	}
-	vvfl = kzalloc(len, GFP_ATOMIC);
-	if (!vvfl) {
-		spin_unlock_bh(&adapter->mac_vlan_list_lock);
-		return;
-	}
-
-	vvfl->vsi_id = adapter->vsi_res->vsi_id;
-	vvfl->num_elements = count;
-	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-		if (f->add) {
-			vvfl->vlan_id[i] = f->vlan;
-			i++;
-			f->add = false;
-			if (i == count)
-				break;
-		}
-	}
-	if (!more)
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
-	kfree(vvfl);
-}
-
-/**
- * i40evf_del_vlans
- * @adapter: adapter structure
- *
- * Request that the PF remove one or more VLAN filters from our VSI.
- **/
-void i40evf_del_vlans(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_vlan_filter_list *vvfl;
-	struct i40evf_vlan_filter *f, *ftmp;
-	int len, i = 0, count = 0;
-	bool more = false;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-
-	spin_lock_bh(&adapter->mac_vlan_list_lock);
-
-	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-		if (f->remove)
-			count++;
-	}
-	if (!count) {
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
-		spin_unlock_bh(&adapter->mac_vlan_list_lock);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
-
-	len = sizeof(struct virtchnl_vlan_filter_list) +
-	      (count * sizeof(u16));
-	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
-		count = (I40EVF_MAX_AQ_BUF_SIZE -
-			 sizeof(struct virtchnl_vlan_filter_list)) /
-			sizeof(u16);
-		len = sizeof(struct virtchnl_vlan_filter_list) +
-		      (count * sizeof(u16));
-		more = true;
-	}
-	vvfl = kzalloc(len, GFP_ATOMIC);
-	if (!vvfl) {
-		spin_unlock_bh(&adapter->mac_vlan_list_lock);
-		return;
-	}
-
-	vvfl->vsi_id = adapter->vsi_res->vsi_id;
-	vvfl->num_elements = count;
-	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
-		if (f->remove) {
-			vvfl->vlan_id[i] = f->vlan;
-			i++;
-			list_del(&f->list);
-			kfree(f);
-			if (i == count)
-				break;
-		}
-	}
-	if (!more)
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
-
-	spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
-	kfree(vvfl);
-}
-
-/**
- * i40evf_set_promiscuous
- * @adapter: adapter structure
- * @flags: bitmask to control unicast/multicast promiscuous.
- *
- * Request that the PF enable promiscuous mode for our VSI.
- **/
-void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
-{
-	struct virtchnl_promisc_info vpi;
-	int promisc_all;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-
-	promisc_all = FLAG_VF_UNICAST_PROMISC |
-		      FLAG_VF_MULTICAST_PROMISC;
-	if ((flags & promisc_all) == promisc_all) {
-		adapter->flags |= I40EVF_FLAG_PROMISC_ON;
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
-		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
-	}
-
-	if (flags & FLAG_VF_MULTICAST_PROMISC) {
-		adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
-		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
-	}
-
-	if (!flags) {
-		adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
-				    I40EVF_FLAG_ALLMULTI_ON);
-		adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
-					  I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
-		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
-	}
-
-	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
-	vpi.vsi_id = adapter->vsi_res->vsi_id;
-	vpi.flags = flags;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
-			   (u8 *)&vpi, sizeof(vpi));
-}
-
-/**
- * i40evf_request_stats
- * @adapter: adapter structure
- *
- * Request VSI statistics from PF.
- **/
-void i40evf_request_stats(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_queue_select vqs;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* no error message, this isn't crucial */
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_GET_STATS;
-	vqs.vsi_id = adapter->vsi_res->vsi_id;
-	/* queue maps are ignored for this message - only the vsi is used */
-	if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
-			       (u8 *)&vqs, sizeof(vqs)))
-		/* if the request failed, don't lock out others */
-		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-}
-
-/**
- * i40evf_get_hena
- * @adapter: adapter structure
- *
- * Request hash enable capabilities from PF
- **/
-void i40evf_get_hena(struct i40evf_adapter *adapter)
-{
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
-			   NULL, 0);
-}
-
-/**
- * i40evf_set_hena
- * @adapter: adapter structure
- *
- * Request the PF to set our RSS hash capabilities
- **/
-void i40evf_set_hena(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_rss_hena vrh;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	vrh.hena = adapter->hena;
-	adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
-			   (u8 *)&vrh, sizeof(vrh));
-}
-
-/**
- * i40evf_set_rss_key
- * @adapter: adapter structure
- *
- * Request the PF to set our RSS hash key
- **/
-void i40evf_set_rss_key(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_rss_key *vrk;
-	int len;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	len = sizeof(struct virtchnl_rss_key) +
-	      (adapter->rss_key_size * sizeof(u8)) - 1;
-	vrk = kzalloc(len, GFP_KERNEL);
-	if (!vrk)
-		return;
-	vrk->vsi_id = adapter->vsi.id;
-	vrk->key_len = adapter->rss_key_size;
-	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
-
-	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
-			   (u8 *)vrk, len);
-	kfree(vrk);
-}
-
-/**
- * i40evf_set_rss_lut
- * @adapter: adapter structure
- *
- * Request the PF to set our RSS lookup table
- **/
-void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_rss_lut *vrl;
-	int len;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	len = sizeof(struct virtchnl_rss_lut) +
-	      (adapter->rss_lut_size * sizeof(u8)) - 1;
-	vrl = kzalloc(len, GFP_KERNEL);
-	if (!vrl)
-		return;
-	vrl->vsi_id = adapter->vsi.id;
-	vrl->lut_entries = adapter->rss_lut_size;
-	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
-	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
-			   (u8 *)vrl, len);
-	kfree(vrl);
-}
-
-/**
- * i40evf_enable_vlan_stripping
- * @adapter: adapter structure
- *
- * Request VLAN header stripping to be enabled
- **/
-void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter)
-{
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
-			   NULL, 0);
-}
-
-/**
- * i40evf_disable_vlan_stripping
- * @adapter: adapter structure
- *
- * Request VLAN header stripping to be disabled
- **/
-void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter)
-{
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
-			   NULL, 0);
-}
-
-/**
- * i40evf_print_link_message - print link up or down
- * @adapter: adapter structure
- *
- * Log a message telling the world of our wonderous link status
- */
-static void i40evf_print_link_message(struct i40evf_adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-	char *speed = "Unknown ";
-
-	if (!adapter->link_up) {
-		netdev_info(netdev, "NIC Link is Down\n");
-		return;
-	}
-
-	switch (adapter->link_speed) {
-	case I40E_LINK_SPEED_40GB:
-		speed = "40 G";
-		break;
-	case I40E_LINK_SPEED_25GB:
-		speed = "25 G";
-		break;
-	case I40E_LINK_SPEED_20GB:
-		speed = "20 G";
-		break;
-	case I40E_LINK_SPEED_10GB:
-		speed = "10 G";
-		break;
-	case I40E_LINK_SPEED_1GB:
-		speed = "1000 M";
-		break;
-	case I40E_LINK_SPEED_100MB:
-		speed = "100 M";
-		break;
-	default:
-		break;
-	}
-
-	netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
-}
-
-/**
- * i40evf_enable_channel
- * @adapter: adapter structure
- *
- * Request that the PF enable channels as specified by
- * the user via tc tool.
- **/
-void i40evf_enable_channels(struct i40evf_adapter *adapter)
-{
-	struct virtchnl_tc_info *vti = NULL;
-	u16 len;
-	int i;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-
-	len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
-	       sizeof(struct virtchnl_tc_info);
-
-	vti = kzalloc(len, GFP_KERNEL);
-	if (!vti)
-		return;
-	vti->num_tc = adapter->num_tc;
-	for (i = 0; i < vti->num_tc; i++) {
-		vti->list[i].count = adapter->ch_config.ch_info[i].count;
-		vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
-		vti->list[i].pad = 0;
-		vti->list[i].max_tx_rate =
-				adapter->ch_config.ch_info[i].max_tx_rate;
-	}
-
-	adapter->ch_config.state = __I40EVF_TC_RUNNING;
-	adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
-	adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS,
-			   (u8 *)vti, len);
-	kfree(vti);
-}
-
-/**
- * i40evf_disable_channel
- * @adapter: adapter structure
- *
- * Request that the PF disable channels that are configured
- **/
-void i40evf_disable_channels(struct i40evf_adapter *adapter)
-{
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-
-	adapter->ch_config.state = __I40EVF_TC_INVALID;
-	adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
-	adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
-	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS;
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS,
-			   NULL, 0);
-}
-
-/**
- * i40evf_print_cloud_filter
- * @adapter: adapter structure
- * @f: cloud filter to print
- *
- * Print the cloud filter
- **/
-static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter,
-				      struct virtchnl_filter *f)
-{
-	switch (f->flow_type) {
-	case VIRTCHNL_TCP_V4_FLOW:
-		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
-			 &f->data.tcp_spec.dst_mac,
-			 &f->data.tcp_spec.src_mac,
-			 ntohs(f->data.tcp_spec.vlan_id),
-			 &f->data.tcp_spec.dst_ip[0],
-			 &f->data.tcp_spec.src_ip[0],
-			 ntohs(f->data.tcp_spec.dst_port),
-			 ntohs(f->data.tcp_spec.src_port));
-		break;
-	case VIRTCHNL_TCP_V6_FLOW:
-		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
-			 &f->data.tcp_spec.dst_mac,
-			 &f->data.tcp_spec.src_mac,
-			 ntohs(f->data.tcp_spec.vlan_id),
-			 &f->data.tcp_spec.dst_ip,
-			 &f->data.tcp_spec.src_ip,
-			 ntohs(f->data.tcp_spec.dst_port),
-			 ntohs(f->data.tcp_spec.src_port));
-		break;
-	}
-}
-
-/**
- * i40evf_add_cloud_filter
- * @adapter: adapter structure
- *
- * Request that the PF add cloud filters as specified
- * by the user via tc tool.
- **/
-void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
-{
-	struct i40evf_cloud_filter *cf;
-	struct virtchnl_filter *f;
-	int len = 0, count = 0;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
-		if (cf->add) {
-			count++;
-			break;
-		}
-	}
-	if (!count) {
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
-
-	len = sizeof(struct virtchnl_filter);
-	f = kzalloc(len, GFP_KERNEL);
-	if (!f)
-		return;
-
-	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
-		if (cf->add) {
-			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
-			cf->add = false;
-			cf->state = __I40EVF_CF_ADD_PENDING;
-			i40evf_send_pf_msg(adapter,
-					   VIRTCHNL_OP_ADD_CLOUD_FILTER,
-					   (u8 *)f, len);
-		}
-	}
-	kfree(f);
-}
-
-/**
- * i40evf_del_cloud_filter
- * @adapter: adapter structure
- *
- * Request that the PF delete cloud filters as specified
- * by the user via tc tool.
- **/
-void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
-{
-	struct i40evf_cloud_filter *cf, *cftmp;
-	struct virtchnl_filter *f;
-	int len = 0, count = 0;
-
-	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
-		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
-			adapter->current_op);
-		return;
-	}
-	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
-		if (cf->del) {
-			count++;
-			break;
-		}
-	}
-	if (!count) {
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
-		return;
-	}
-	adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
-
-	len = sizeof(struct virtchnl_filter);
-	f = kzalloc(len, GFP_KERNEL);
-	if (!f)
-		return;
-
-	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
-		if (cf->del) {
-			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
-			cf->del = false;
-			cf->state = __I40EVF_CF_DEL_PENDING;
-			i40evf_send_pf_msg(adapter,
-					   VIRTCHNL_OP_DEL_CLOUD_FILTER,
-					   (u8 *)f, len);
-		}
-	}
-	kfree(f);
-}
-
-/**
- * i40evf_request_reset
- * @adapter: adapter structure
- *
- * Request that the PF reset this VF. No response is expected.
- **/
-void i40evf_request_reset(struct i40evf_adapter *adapter)
-{
-	/* Don't check CURRENT_OP - this is always higher priority */
-	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
-	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-}
-
-/**
- * i40evf_virtchnl_completion
- * @adapter: adapter structure
- * @v_opcode: opcode sent by PF
- * @v_retval: retval sent by PF
- * @msg: message sent by PF
- * @msglen: message length
- *
- * Asynchronous completion function for admin queue messages. Rather than busy
- * wait, we fire off our requests and assume that no errors will be returned.
- * This function handles the reply messages.
- **/
-void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
-				enum virtchnl_ops v_opcode,
-				i40e_status v_retval,
-				u8 *msg, u16 msglen)
-{
-	struct net_device *netdev = adapter->netdev;
-
-	if (v_opcode == VIRTCHNL_OP_EVENT) {
-		struct virtchnl_pf_event *vpe =
-			(struct virtchnl_pf_event *)msg;
-		bool link_up = vpe->event_data.link_event.link_status;
-		switch (vpe->event) {
-		case VIRTCHNL_EVENT_LINK_CHANGE:
-			adapter->link_speed =
-				vpe->event_data.link_event.link_speed;
-
-			/* we've already got the right link status, bail */
-			if (adapter->link_up == link_up)
-				break;
-
-			if (link_up) {
-				/* If we get link up message and start queues
-				 * before our queues are configured it will
-				 * trigger a TX hang. In that case, just ignore
-				 * the link status message,we'll get another one
-				 * after we enable queues and actually prepared
-				 * to send traffic.
-				 */
-				if (adapter->state != __I40EVF_RUNNING)
-					break;
-
-				/* For ADq enabled VF, we reconfigure VSIs and
-				 * re-allocate queues. Hence wait till all
-				 * queues are enabled.
-				 */
-				if (adapter->flags &
-				    I40EVF_FLAG_QUEUES_DISABLED)
-					break;
-			}
-
-			adapter->link_up = link_up;
-			if (link_up) {
-				netif_tx_start_all_queues(netdev);
-				netif_carrier_on(netdev);
-			} else {
-				netif_tx_stop_all_queues(netdev);
-				netif_carrier_off(netdev);
-			}
-			i40evf_print_link_message(adapter);
-			break;
-		case VIRTCHNL_EVENT_RESET_IMPENDING:
-			dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
-			if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
-				adapter->flags |= I40EVF_FLAG_RESET_PENDING;
-				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
-				schedule_work(&adapter->reset_task);
-			}
-			break;
-		default:
-			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
-				vpe->event);
-			break;
-		}
-		return;
-	}
-	if (v_retval) {
-		switch (v_opcode) {
-		case VIRTCHNL_OP_ADD_VLAN:
-			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
-				i40evf_stat_str(&adapter->hw, v_retval));
-			break;
-		case VIRTCHNL_OP_ADD_ETH_ADDR:
-			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
-				i40evf_stat_str(&adapter->hw, v_retval));
-			break;
-		case VIRTCHNL_OP_DEL_VLAN:
-			dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
-				i40evf_stat_str(&adapter->hw, v_retval));
-			break;
-		case VIRTCHNL_OP_DEL_ETH_ADDR:
-			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
-				i40evf_stat_str(&adapter->hw, v_retval));
-			break;
-		case VIRTCHNL_OP_ENABLE_CHANNELS:
-			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
-				i40evf_stat_str(&adapter->hw, v_retval));
-			adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
-			adapter->ch_config.state = __I40EVF_TC_INVALID;
-			netdev_reset_tc(netdev);
-			netif_tx_start_all_queues(netdev);
-			break;
-		case VIRTCHNL_OP_DISABLE_CHANNELS:
-			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
-				i40evf_stat_str(&adapter->hw, v_retval));
-			adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
-			adapter->ch_config.state = __I40EVF_TC_RUNNING;
-			netif_tx_start_all_queues(netdev);
-			break;
-		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
-			struct i40evf_cloud_filter *cf, *cftmp;
-
-			list_for_each_entry_safe(cf, cftmp,
-						 &adapter->cloud_filter_list,
-						 list) {
-				if (cf->state == __I40EVF_CF_ADD_PENDING) {
-					cf->state = __I40EVF_CF_INVALID;
-					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
-						 i40evf_stat_str(&adapter->hw,
-								 v_retval));
-					i40evf_print_cloud_filter(adapter,
-								  &cf->f);
-					list_del(&cf->list);
-					kfree(cf);
-					adapter->num_cloud_filters--;
-				}
-			}
-			}
-			break;
-		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
-			struct i40evf_cloud_filter *cf;
-
-			list_for_each_entry(cf, &adapter->cloud_filter_list,
-					    list) {
-				if (cf->state == __I40EVF_CF_DEL_PENDING) {
-					cf->state = __I40EVF_CF_ACTIVE;
-					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
-						 i40evf_stat_str(&adapter->hw,
-								 v_retval));
-					i40evf_print_cloud_filter(adapter,
-								  &cf->f);
-				}
-			}
-			}
-			break;
-		default:
-			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
-				v_retval,
-				i40evf_stat_str(&adapter->hw, v_retval),
-				v_opcode);
-		}
-	}
-	switch (v_opcode) {
-	case VIRTCHNL_OP_GET_STATS: {
-		struct i40e_eth_stats *stats =
-			(struct i40e_eth_stats *)msg;
-		netdev->stats.rx_packets = stats->rx_unicast +
-					   stats->rx_multicast +
-					   stats->rx_broadcast;
-		netdev->stats.tx_packets = stats->tx_unicast +
-					   stats->tx_multicast +
-					   stats->tx_broadcast;
-		netdev->stats.rx_bytes = stats->rx_bytes;
-		netdev->stats.tx_bytes = stats->tx_bytes;
-		netdev->stats.tx_errors = stats->tx_errors;
-		netdev->stats.rx_dropped = stats->rx_discards;
-		netdev->stats.tx_dropped = stats->tx_discards;
-		adapter->current_stats = *stats;
-		}
-		break;
-	case VIRTCHNL_OP_GET_VF_RESOURCES: {
-		u16 len = sizeof(struct virtchnl_vf_resource) +
-			  I40E_MAX_VF_VSI *
-			  sizeof(struct virtchnl_vsi_resource);
-		memcpy(adapter->vf_res, msg, min(msglen, len));
-		i40evf_validate_num_queues(adapter);
-		i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
-		/* restore current mac address */
-		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
-		i40evf_process_config(adapter);
-		}
-		break;
-	case VIRTCHNL_OP_ENABLE_QUEUES:
-		/* enable transmits */
-		i40evf_irq_enable(adapter, true);
-		adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED;
-		break;
-	case VIRTCHNL_OP_DISABLE_QUEUES:
-		i40evf_free_all_tx_resources(adapter);
-		i40evf_free_all_rx_resources(adapter);
-		if (adapter->state == __I40EVF_DOWN_PENDING) {
-			adapter->state = __I40EVF_DOWN;
-			wake_up(&adapter->down_waitqueue);
-		}
-		break;
-	case VIRTCHNL_OP_VERSION:
-	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
-		/* Don't display an error if we get these out of sequence.
-		 * If the firmware needed to get kicked, we'll get these and
-		 * it's no problem.
-		 */
-		if (v_opcode != adapter->current_op)
-			return;
-		break;
-	case VIRTCHNL_OP_IWARP:
-		/* Gobble zero-length replies from the PF. They indicate that
-		 * a previous message was received OK, and the client doesn't
-		 * care about that.
-		 */
-		if (msglen && CLIENT_ENABLED(adapter))
-			i40evf_notify_client_message(&adapter->vsi,
-						     msg, msglen);
-		break;
-
-	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
-		adapter->client_pending &=
-				~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
-		break;
-	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
-		struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
-		if (msglen == sizeof(*vrh))
-			adapter->hena = vrh->hena;
-		else
-			dev_warn(&adapter->pdev->dev,
-				 "Invalid message %d from PF\n", v_opcode);
-		}
-		break;
-	case VIRTCHNL_OP_REQUEST_QUEUES: {
-		struct virtchnl_vf_res_request *vfres =
-			(struct virtchnl_vf_res_request *)msg;
-		if (vfres->num_queue_pairs != adapter->num_req_queues) {
-			dev_info(&adapter->pdev->dev,
-				 "Requested %d queues, PF can support %d\n",
-				 adapter->num_req_queues,
-				 vfres->num_queue_pairs);
-			adapter->num_req_queues = 0;
-			adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
-		}
-		}
-		break;
-	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
-		struct i40evf_cloud_filter *cf;
-
-		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
-			if (cf->state == __I40EVF_CF_ADD_PENDING)
-				cf->state = __I40EVF_CF_ACTIVE;
-		}
-		}
-		break;
-	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
-		struct i40evf_cloud_filter *cf, *cftmp;
-
-		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
-					 list) {
-			if (cf->state == __I40EVF_CF_DEL_PENDING) {
-				cf->state = __I40EVF_CF_INVALID;
-				list_del(&cf->list);
-				kfree(cf);
-				adapter->num_cloud_filters--;
-			}
-		}
-		}
-		break;
-	default:
-		if (adapter->current_op && (v_opcode != adapter->current_op))
-			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
-				 adapter->current_op, v_opcode);
-		break;
-	} /* switch v_opcode */
-	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-}
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
new file mode 100644
index 000000000000..1b050d9d5f49
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2013 - 2018 Intel Corporation.
+#
+# Makefile for the Intel(R) Ethernet Adaptive Virtual Function (iavf)
+# driver
+#
+#
+
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
+obj-$(CONFIG_IAVF) += iavf.o
+
+iavf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
+	      i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/i40e_adminq.c
new file mode 100644
index 000000000000..32e0e2d9cdc5
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.c
@@ -0,0 +1,940 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ *  i40e_adminq_init_regs - Initialize AdminQ registers
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+	/* set head and tail registers in our local struct */
+	if (i40e_is_vf(hw)) {
+		hw->aq.asq.tail = I40E_VF_ATQT1;
+		hw->aq.asq.head = I40E_VF_ATQH1;
+		hw->aq.asq.len  = I40E_VF_ATQLEN1;
+		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
+		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
+		hw->aq.arq.tail = I40E_VF_ARQT1;
+		hw->aq.arq.head = I40E_VF_ARQH1;
+		hw->aq.arq.len  = I40E_VF_ARQLEN1;
+		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
+		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
+	}
+}
+
+/**
+ *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+					 i40e_mem_atq_ring,
+					 (hw->aq.num_asq_entries *
+					 sizeof(struct i40e_aq_desc)),
+					 I40E_ADMINQ_DESC_ALIGNMENT);
+	if (ret_code)
+		return ret_code;
+
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+					  (hw->aq.num_asq_entries *
+					  sizeof(struct i40e_asq_cmd_details)));
+	if (ret_code) {
+		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+		return ret_code;
+	}
+
+	return ret_code;
+}
+
+/**
+ *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+					 i40e_mem_arq_ring,
+					 (hw->aq.num_arq_entries *
+					 sizeof(struct i40e_aq_desc)),
+					 I40E_ADMINQ_DESC_ALIGNMENT);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_free_adminq_asq - Free Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted send buffers have already been cleaned
+ *  and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ *  i40e_free_adminq_arq - Free Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted receive buffers have already been cleaned
+ *  and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+	struct i40e_aq_desc *desc;
+	struct i40e_dma_mem *bi;
+	int i;
+
+	/* We'll be allocating the buffer info memory first, then we can
+	 * allocate the mapped buffers for the event processing
+	 */
+
+	/* buffer_info structures do not need alignment */
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+	if (ret_code)
+		goto alloc_arq_bufs;
+	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+	/* allocate the mapped buffers */
+	for (i = 0; i < hw->aq.num_arq_entries; i++) {
+		bi = &hw->aq.arq.r.arq_bi[i];
+		ret_code = i40e_allocate_dma_mem(hw, bi,
+						 i40e_mem_arq_buf,
+						 hw->aq.arq_buf_size,
+						 I40E_ADMINQ_DESC_ALIGNMENT);
+		if (ret_code)
+			goto unwind_alloc_arq_bufs;
+
+		/* now configure the descriptors for use */
+		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+		desc->opcode = 0;
+		/* This is in accordance with Admin queue design, there is no
+		 * register for buffer size configuration
+		 */
+		desc->datalen = cpu_to_le16((u16)bi->size);
+		desc->retval = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.external.addr_high =
+			cpu_to_le32(upper_32_bits(bi->pa));
+		desc->params.external.addr_low =
+			cpu_to_le32(lower_32_bits(bi->pa));
+		desc->params.external.param0 = 0;
+		desc->params.external.param1 = 0;
+	}
+
+alloc_arq_bufs:
+	return ret_code;
+
+unwind_alloc_arq_bufs:
+	/* don't try to free the one that failed... */
+	i--;
+	for (; i >= 0; i--)
+		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+	struct i40e_dma_mem *bi;
+	int i;
+
+	/* No mapped memory needed yet, just the buffer info structures */
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+	if (ret_code)
+		goto alloc_asq_bufs;
+	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+	/* allocate the mapped buffers */
+	for (i = 0; i < hw->aq.num_asq_entries; i++) {
+		bi = &hw->aq.asq.r.asq_bi[i];
+		ret_code = i40e_allocate_dma_mem(hw, bi,
+						 i40e_mem_asq_buf,
+						 hw->aq.asq_buf_size,
+						 I40E_ADMINQ_DESC_ALIGNMENT);
+		if (ret_code)
+			goto unwind_alloc_asq_bufs;
+	}
+alloc_asq_bufs:
+	return ret_code;
+
+unwind_alloc_asq_bufs:
+	/* don't try to free the one that failed... */
+	i--;
+	for (; i >= 0; i--)
+		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_free_arq_bufs - Free receive queue buffer info elements
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+	int i;
+
+	/* free descriptors */
+	for (i = 0; i < hw->aq.num_arq_entries; i++)
+		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ *  i40e_free_asq_bufs - Free send queue buffer info elements
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+	int i;
+
+	/* only unmap if the address is non-NULL */
+	for (i = 0; i < hw->aq.num_asq_entries; i++)
+		if (hw->aq.asq.r.asq_bi[i].pa)
+			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+	/* free the buffer info list */
+	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ *  i40e_config_asq_regs - configure ASQ registers
+ *  @hw: pointer to the hardware structure
+ *
+ *  Configure base address and length registers for the transmit queue
+ **/
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+	u32 reg = 0;
+
+	/* Clear Head and Tail */
+	wr32(hw, hw->aq.asq.head, 0);
+	wr32(hw, hw->aq.asq.tail, 0);
+
+	/* set starting point */
+	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+				  I40E_VF_ATQLEN1_ATQENABLE_MASK));
+	wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
+	wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+
+	/* Check one register to verify that config was applied */
+	reg = rd32(hw, hw->aq.asq.bal);
+	if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+	return ret_code;
+}
+
+/**
+ *  i40e_config_arq_regs - ARQ register configuration
+ *  @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+	u32 reg = 0;
+
+	/* Clear Head and Tail */
+	wr32(hw, hw->aq.arq.head, 0);
+	wr32(hw, hw->aq.arq.tail, 0);
+
+	/* set starting point */
+	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+				  I40E_VF_ARQLEN1_ARQENABLE_MASK));
+	wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
+	wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+
+	/* Update tail in the HW to post pre-allocated buffers */
+	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+	/* Check one register to verify that config was applied */
+	reg = rd32(hw, hw->aq.arq.bal);
+	if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+	return ret_code;
+}
+
+/**
+ *  i40e_init_asq - main initialization routine for ASQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  This is the main initialization routine for the Admin Send Queue
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.asq.count > 0) {
+		/* queue already initialized */
+		ret_code = I40E_ERR_NOT_READY;
+		goto init_adminq_exit;
+	}
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_asq_entries == 0) ||
+	    (hw->aq.asq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	hw->aq.asq.next_to_use = 0;
+	hw->aq.asq.next_to_clean = 0;
+
+	/* allocate the ring memory */
+	ret_code = i40e_alloc_adminq_asq_ring(hw);
+	if (ret_code)
+		goto init_adminq_exit;
+
+	/* allocate buffers in the rings */
+	ret_code = i40e_alloc_asq_bufs(hw);
+	if (ret_code)
+		goto init_adminq_free_rings;
+
+	/* initialize base registers */
+	ret_code = i40e_config_asq_regs(hw);
+	if (ret_code)
+		goto init_adminq_free_rings;
+
+	/* success! */
+	hw->aq.asq.count = hw->aq.num_asq_entries;
+	goto init_adminq_exit;
+
+init_adminq_free_rings:
+	i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_init_arq - initialize ARQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main initialization routine for the Admin Receive (Event) Queue.
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.arq.count > 0) {
+		/* queue already initialized */
+		ret_code = I40E_ERR_NOT_READY;
+		goto init_adminq_exit;
+	}
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_arq_entries == 0) ||
+	    (hw->aq.arq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	hw->aq.arq.next_to_use = 0;
+	hw->aq.arq.next_to_clean = 0;
+
+	/* allocate the ring memory */
+	ret_code = i40e_alloc_adminq_arq_ring(hw);
+	if (ret_code)
+		goto init_adminq_exit;
+
+	/* allocate buffers in the rings */
+	ret_code = i40e_alloc_arq_bufs(hw);
+	if (ret_code)
+		goto init_adminq_free_rings;
+
+	/* initialize base registers */
+	ret_code = i40e_config_arq_regs(hw);
+	if (ret_code)
+		goto init_adminq_free_rings;
+
+	/* success! */
+	hw->aq.arq.count = hw->aq.num_arq_entries;
+	goto init_adminq_exit;
+
+init_adminq_free_rings:
+	i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_asq - shutdown the ASQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	mutex_lock(&hw->aq.asq_mutex);
+
+	if (hw->aq.asq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_asq_out;
+	}
+
+	/* Stop firmware AdminQ processing */
+	wr32(hw, hw->aq.asq.head, 0);
+	wr32(hw, hw->aq.asq.tail, 0);
+	wr32(hw, hw->aq.asq.len, 0);
+	wr32(hw, hw->aq.asq.bal, 0);
+	wr32(hw, hw->aq.asq.bah, 0);
+
+	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+	/* free ring buffers */
+	i40e_free_asq_bufs(hw);
+
+shutdown_asq_out:
+	mutex_unlock(&hw->aq.asq_mutex);
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_arq - shutdown ARQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	mutex_lock(&hw->aq.arq_mutex);
+
+	if (hw->aq.arq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_arq_out;
+	}
+
+	/* Stop firmware AdminQ processing */
+	wr32(hw, hw->aq.arq.head, 0);
+	wr32(hw, hw->aq.arq.tail, 0);
+	wr32(hw, hw->aq.arq.len, 0);
+	wr32(hw, hw->aq.arq.bal, 0);
+	wr32(hw, hw->aq.arq.bah, 0);
+
+	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+	/* free ring buffers */
+	i40e_free_arq_bufs(hw);
+
+shutdown_arq_out:
+	mutex_unlock(&hw->aq.arq_mutex);
+	return ret_code;
+}
+
+/**
+ *  i40evf_init_adminq - main initialization routine for Admin Queue
+ *  @hw: pointer to the hardware structure
+ *
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.num_arq_entries
+ *     - hw->aq.arq_buf_size
+ *     - hw->aq.asq_buf_size
+ **/
+i40e_status i40evf_init_adminq(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_arq_entries == 0) ||
+	    (hw->aq.num_asq_entries == 0) ||
+	    (hw->aq.arq_buf_size == 0) ||
+	    (hw->aq.asq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	/* Set up register offsets */
+	i40e_adminq_init_regs(hw);
+
+	/* setup ASQ command write back timeout */
+	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+	/* allocate the ASQ */
+	ret_code = i40e_init_asq(hw);
+	if (ret_code)
+		goto init_adminq_destroy_locks;
+
+	/* allocate the ARQ */
+	ret_code = i40e_init_arq(hw);
+	if (ret_code)
+		goto init_adminq_free_asq;
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_asq:
+	i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40evf_shutdown_adminq - shutdown routine for the Admin Queue
+ *  @hw: pointer to the hardware structure
+ **/
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (i40evf_check_asq_alive(hw))
+		i40evf_aq_queue_shutdown(hw, true);
+
+	i40e_shutdown_asq(hw);
+	i40e_shutdown_arq(hw);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_clean_asq - cleans Admin send queue
+ *  @hw: pointer to the hardware structure
+ *
+ *  returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+	struct i40e_adminq_ring *asq = &(hw->aq.asq);
+	struct i40e_asq_cmd_details *details;
+	u16 ntc = asq->next_to_clean;
+	struct i40e_aq_desc desc_cb;
+	struct i40e_aq_desc *desc;
+
+	desc = I40E_ADMINQ_DESC(*asq, ntc);
+	details = I40E_ADMINQ_DETAILS(*asq, ntc);
+	while (rd32(hw, hw->aq.asq.head) != ntc) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+		if (details->callback) {
+			I40E_ADMINQ_CALLBACK cb_func =
+					(I40E_ADMINQ_CALLBACK)details->callback;
+			desc_cb = *desc;
+			cb_func(hw, &desc_cb);
+		}
+		memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+		memset((void *)details, 0,
+		       sizeof(struct i40e_asq_cmd_details));
+		ntc++;
+		if (ntc == asq->count)
+			ntc = 0;
+		desc = I40E_ADMINQ_DESC(*asq, ntc);
+		details = I40E_ADMINQ_DETAILS(*asq, ntc);
+	}
+
+	asq->next_to_clean = ntc;
+
+	return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ *  i40evf_asq_done - check if FW has processed the Admin Send Queue
+ *  @hw: pointer to the hw struct
+ *
+ *  Returns true if the firmware has processed all descriptors on the
+ *  admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40evf_asq_done(struct i40e_hw *hw)
+{
+	/* AQ designers suggest use of head for better
+	 * timing reliability than DD bit
+	 */
+	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ *  i40evf_asq_send_command - send command to Admin Queue
+ *  @hw: pointer to the hw struct
+ *  @desc: prefilled descriptor describing the command (non DMA mem)
+ *  @buff: buffer to use for indirect commands
+ *  @buff_size: size of buffer for indirect commands
+ *  @cmd_details: pointer to command details structure
+ *
+ *  This is the main send command driver routine for the Admin Queue send
+ *  queue.  It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+				struct i40e_aq_desc *desc,
+				void *buff, /* can be NULL */
+				u16  buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	i40e_status status = 0;
+	struct i40e_dma_mem *dma_buff = NULL;
+	struct i40e_asq_cmd_details *details;
+	struct i40e_aq_desc *desc_on_ring;
+	bool cmd_completed = false;
+	u16  retval = 0;
+	u32  val = 0;
+
+	mutex_lock(&hw->aq.asq_mutex);
+
+	if (hw->aq.asq.count == 0) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Admin queue not initialized.\n");
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_error;
+	}
+
+	hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+	val = rd32(hw, hw->aq.asq.head);
+	if (val >= hw->aq.num_asq_entries) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: head overrun at %d\n", val);
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_error;
+	}
+
+	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+	if (cmd_details) {
+		*details = *cmd_details;
+
+		/* If the cmd_details are defined copy the cookie.  The
+		 * cpu_to_le32 is not needed here because the data is ignored
+		 * by the FW, only used by the driver
+		 */
+		if (details->cookie) {
+			desc->cookie_high =
+				cpu_to_le32(upper_32_bits(details->cookie));
+			desc->cookie_low =
+				cpu_to_le32(lower_32_bits(details->cookie));
+		}
+	} else {
+		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+	}
+
+	/* clear requested flags and then set additional flags if defined */
+	desc->flags &= ~cpu_to_le16(details->flags_dis);
+	desc->flags |= cpu_to_le16(details->flags_ena);
+
+	if (buff_size > hw->aq.asq_buf_size) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Invalid buffer size: %d.\n",
+			   buff_size);
+		status = I40E_ERR_INVALID_SIZE;
+		goto asq_send_command_error;
+	}
+
+	if (details->postpone && !details->async) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Async flag not set along with postpone flag");
+		status = I40E_ERR_PARAM;
+		goto asq_send_command_error;
+	}
+
+	/* call clean and check queue available function to reclaim the
+	 * descriptors that were processed by FW, the function returns the
+	 * number of desc available
+	 */
+	/* the clean function called here could be called in a separate thread
+	 * in case of asynchronous completions
+	 */
+	if (i40e_clean_asq(hw) == 0) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Error queue is full.\n");
+		status = I40E_ERR_ADMIN_QUEUE_FULL;
+		goto asq_send_command_error;
+	}
+
+	/* initialize the temp desc pointer with the right desc */
+	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+	/* if the desc is available copy the temp desc to the right place */
+	*desc_on_ring = *desc;
+
+	/* if buff is not NULL assume indirect command */
+	if (buff != NULL) {
+		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+		/* copy the user buff into the respective DMA buff */
+		memcpy(dma_buff->va, buff, buff_size);
+		desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+		/* Update the address values in the desc with the pa value
+		 * for respective buffer
+		 */
+		desc_on_ring->params.external.addr_high =
+				cpu_to_le32(upper_32_bits(dma_buff->pa));
+		desc_on_ring->params.external.addr_low =
+				cpu_to_le32(lower_32_bits(dma_buff->pa));
+	}
+
+	/* bump the tail */
+	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+			buff, buff_size);
+	(hw->aq.asq.next_to_use)++;
+	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+		hw->aq.asq.next_to_use = 0;
+	if (!details->postpone)
+		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+	/* if cmd_details are not defined or async flag is not set,
+	 * we need to wait for desc write back
+	 */
+	if (!details->async && !details->postpone) {
+		u32 total_delay = 0;
+
+		do {
+			/* AQ designers suggest use of head for better
+			 * timing reliability than DD bit
+			 */
+			if (i40evf_asq_done(hw))
+				break;
+			udelay(50);
+			total_delay += 50;
+		} while (total_delay < hw->aq.asq_cmd_timeout);
+	}
+
+	/* if ready, copy the desc back to temp */
+	if (i40evf_asq_done(hw)) {
+		*desc = *desc_on_ring;
+		if (buff != NULL)
+			memcpy(buff, dma_buff->va, buff_size);
+		retval = le16_to_cpu(desc->retval);
+		if (retval != 0) {
+			i40e_debug(hw,
+				   I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: Command completed with error 0x%X.\n",
+				   retval);
+
+			/* strip off FW internal code */
+			retval &= 0xff;
+		}
+		cmd_completed = true;
+		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+			status = 0;
+		else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+			status = I40E_ERR_NOT_READY;
+		else
+			status = I40E_ERR_ADMIN_QUEUE_ERROR;
+		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+	}
+
+	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+		   "AQTX: desc and buffer writeback:\n");
+	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
+			buff_size);
+
+	/* save writeback aq if requested */
+	if (details->wb_desc)
+		*details->wb_desc = *desc_on_ring;
+
+	/* update the error if time out occurred */
+	if ((!cmd_completed) &&
+	    (!details->async && !details->postpone)) {
+		if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: AQ Critical error.\n");
+			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+		} else {
+			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: Writeback timeout.\n");
+			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+		}
+	}
+
+asq_send_command_error:
+	mutex_unlock(&hw->aq.asq_mutex);
+	return status;
+}
+
+/**
+ *  i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ *  @desc:     pointer to the temp descriptor (non DMA mem)
+ *  @opcode:   the opcode can be used to decide which flags to turn off or on
+ *
+ *  Fill the desc with default values
+ **/
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+				       u16 opcode)
+{
+	/* zero out the desc */
+	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+	desc->opcode = cpu_to_le16(opcode);
+	desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ *  i40evf_clean_arq_element
+ *  @hw: pointer to the hw struct
+ *  @e: event info from the receive descriptor, includes any buffers
+ *  @pending: number of events that could be left to process
+ *
+ *  This function cleans one Admin Receive Queue element and returns
+ *  the contents through e.  It can also return how many events are
+ *  left to process through 'pending'
+ **/
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+					     struct i40e_arq_event_info *e,
+					     u16 *pending)
+{
+	i40e_status ret_code = 0;
+	u16 ntc = hw->aq.arq.next_to_clean;
+	struct i40e_aq_desc *desc;
+	struct i40e_dma_mem *bi;
+	u16 desc_idx;
+	u16 datalen;
+	u16 flags;
+	u16 ntu;
+
+	/* pre-clean the event info */
+	memset(&e->desc, 0, sizeof(e->desc));
+
+	/* take the lock before we start messing with the ring */
+	mutex_lock(&hw->aq.arq_mutex);
+
+	if (hw->aq.arq.count == 0) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Admin queue not initialized.\n");
+		ret_code = I40E_ERR_QUEUE_EMPTY;
+		goto clean_arq_element_err;
+	}
+
+	/* set next_to_use to head */
+	ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
+	if (ntu == ntc) {
+		/* nothing to do - shouldn't need to update ring's values */
+		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+		goto clean_arq_element_out;
+	}
+
+	/* now clean the next descriptor */
+	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+	desc_idx = ntc;
+
+	hw->aq.arq_last_status =
+		(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+	flags = le16_to_cpu(desc->flags);
+	if (flags & I40E_AQ_FLAG_ERR) {
+		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Event received with error 0x%X.\n",
+			   hw->aq.arq_last_status);
+	}
+
+	e->desc = *desc;
+	datalen = le16_to_cpu(desc->datalen);
+	e->msg_len = min(datalen, e->buf_len);
+	if (e->msg_buf != NULL && (e->msg_len != 0))
+		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+		       e->msg_len);
+
+	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+			hw->aq.arq_buf_size);
+
+	/* Restore the original datalen and buffer address in the desc,
+	 * FW updates datalen to indicate the event message
+	 * size
+	 */
+	bi = &hw->aq.arq.r.arq_bi[ntc];
+	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+	desc->datalen = cpu_to_le16((u16)bi->size);
+	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+	/* set tail = the last cleaned desc index. */
+	wr32(hw, hw->aq.arq.tail, ntc);
+	/* ntc is updated to tail + 1 */
+	ntc++;
+	if (ntc == hw->aq.num_arq_entries)
+		ntc = 0;
+	hw->aq.arq.next_to_clean = ntc;
+	hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+	/* Set pending if needed, unlock and return */
+	if (pending != NULL)
+		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
+	mutex_unlock(&hw->aq.arq_mutex);
+
+	return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/i40e_adminq.h
new file mode 100644
index 000000000000..1f264b9b6805
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_status.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i)   \
+	(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+	struct i40e_virt_mem dma_head;	/* space for dma structures */
+	struct i40e_dma_mem desc_buf;	/* descriptor ring memory */
+	struct i40e_virt_mem cmd_buf;	/* command buffer memory */
+
+	union {
+		struct i40e_dma_mem *asq_bi;
+		struct i40e_dma_mem *arq_bi;
+	} r;
+
+	u16 count;		/* Number of descriptors */
+	u16 rx_buf_len;		/* Admin Receive Queue buffer length */
+
+	/* used for interrupt processing */
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	/* used for queue tracking */
+	u32 head;
+	u32 tail;
+	u32 len;
+	u32 bah;
+	u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+	void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+	u64 cookie;
+	u16 flags_ena;
+	u16 flags_dis;
+	bool async;
+	bool postpone;
+	struct i40e_aq_desc *wb_desc;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i)   \
+	(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+	struct i40e_aq_desc desc;
+	u16 msg_len;
+	u16 buf_len;
+	u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+	struct i40e_adminq_ring arq;    /* receive queue */
+	struct i40e_adminq_ring asq;    /* send queue */
+	u32 asq_cmd_timeout;            /* send queue cmd write back timeout*/
+	u16 num_arq_entries;            /* receive queue depth */
+	u16 num_asq_entries;            /* send queue depth */
+	u16 arq_buf_size;               /* receive queue buffer size */
+	u16 asq_buf_size;               /* send queue buffer size */
+	u16 fw_maj_ver;                 /* firmware major version */
+	u16 fw_min_ver;                 /* firmware minor version */
+	u32 fw_build;                   /* firmware build number */
+	u16 api_maj_ver;                /* api major version */
+	u16 api_min_ver;                /* api minor version */
+
+	struct mutex asq_mutex; /* Send queue lock */
+	struct mutex arq_mutex; /* Receive queue lock */
+
+	/* last status values on send and receive queues */
+	enum i40e_admin_queue_err asq_last_status;
+	enum i40e_admin_queue_err arq_last_status;
+};
+
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
+ **/
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+{
+	int aq_to_posix[] = {
+		0,           /* I40E_AQ_RC_OK */
+		-EPERM,      /* I40E_AQ_RC_EPERM */
+		-ENOENT,     /* I40E_AQ_RC_ENOENT */
+		-ESRCH,      /* I40E_AQ_RC_ESRCH */
+		-EINTR,      /* I40E_AQ_RC_EINTR */
+		-EIO,        /* I40E_AQ_RC_EIO */
+		-ENXIO,      /* I40E_AQ_RC_ENXIO */
+		-E2BIG,      /* I40E_AQ_RC_E2BIG */
+		-EAGAIN,     /* I40E_AQ_RC_EAGAIN */
+		-ENOMEM,     /* I40E_AQ_RC_ENOMEM */
+		-EACCES,     /* I40E_AQ_RC_EACCES */
+		-EFAULT,     /* I40E_AQ_RC_EFAULT */
+		-EBUSY,      /* I40E_AQ_RC_EBUSY */
+		-EEXIST,     /* I40E_AQ_RC_EEXIST */
+		-EINVAL,     /* I40E_AQ_RC_EINVAL */
+		-ENOTTY,     /* I40E_AQ_RC_ENOTTY */
+		-ENOSPC,     /* I40E_AQ_RC_ENOSPC */
+		-ENOSYS,     /* I40E_AQ_RC_ENOSYS */
+		-ERANGE,     /* I40E_AQ_RC_ERANGE */
+		-EPIPE,      /* I40E_AQ_RC_EFLUSHED */
+		-ESPIPE,     /* I40E_AQ_RC_BAD_ADDR */
+		-EROFS,      /* I40E_AQ_RC_EMODE */
+		-EFBIG,      /* I40E_AQ_RC_EFBIG */
+	};
+
+	/* aq_rc is invalid if AQ timed out */
+	if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+		return -EAGAIN;
+
+	if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+		return -ERANGE;
+
+	return aq_to_posix[aq_rc];
+}
+
+/* general information */
+#define I40E_AQ_LARGE_BUF	512
+#define I40E_ASQ_CMD_TIMEOUT	250000  /* usecs */
+
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+				       u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
new file mode 100644
index 000000000000..493bdc5331f7
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
@@ -0,0 +1,528 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR	0x0001
+#define I40E_FW_API_VERSION_MINOR_X722	0x0005
+#define I40E_FW_API_VERSION_MINOR_X710	0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+					I40E_FW_API_VERSION_MINOR_X710 : \
+					I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs  */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+
+struct i40e_aq_desc {
+	__le16 flags;
+	__le16 opcode;
+	__le16 datalen;
+	__le16 retval;
+	__le32 cookie_high;
+	__le32 cookie_low;
+	union {
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 param2;
+			__le32 param3;
+		} internal;
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 addr_high;
+			__le32 addr_low;
+		} external;
+		u8 raw[16];
+	} params;
+};
+
+/* Flags sub-structure
+ * |0  |1  |2  |3  |4  |5  |6  |7  |8  |9  |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * *  RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT	0
+#define I40E_AQ_FLAG_CMP_SHIFT	1
+#define I40E_AQ_FLAG_ERR_SHIFT	2
+#define I40E_AQ_FLAG_VFE_SHIFT	3
+#define I40E_AQ_FLAG_LB_SHIFT	9
+#define I40E_AQ_FLAG_RD_SHIFT	10
+#define I40E_AQ_FLAG_VFC_SHIFT	11
+#define I40E_AQ_FLAG_BUF_SHIFT	12
+#define I40E_AQ_FLAG_SI_SHIFT	13
+#define I40E_AQ_FLAG_EI_SHIFT	14
+#define I40E_AQ_FLAG_FE_SHIFT	15
+
+#define I40E_AQ_FLAG_DD		BIT(I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP	BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR	BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE	BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB		BIT(I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD		BIT(I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC	BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF	BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI		BIT(I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI		BIT(I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE		BIT(I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+	I40E_AQ_RC_OK		= 0,  /* success */
+	I40E_AQ_RC_EPERM	= 1,  /* Operation not permitted */
+	I40E_AQ_RC_ENOENT	= 2,  /* No such element */
+	I40E_AQ_RC_ESRCH	= 3,  /* Bad opcode */
+	I40E_AQ_RC_EINTR	= 4,  /* operation interrupted */
+	I40E_AQ_RC_EIO		= 5,  /* I/O error */
+	I40E_AQ_RC_ENXIO	= 6,  /* No such resource */
+	I40E_AQ_RC_E2BIG	= 7,  /* Arg too long */
+	I40E_AQ_RC_EAGAIN	= 8,  /* Try again */
+	I40E_AQ_RC_ENOMEM	= 9,  /* Out of memory */
+	I40E_AQ_RC_EACCES	= 10, /* Permission denied */
+	I40E_AQ_RC_EFAULT	= 11, /* Bad address */
+	I40E_AQ_RC_EBUSY	= 12, /* Device or resource busy */
+	I40E_AQ_RC_EEXIST	= 13, /* object already exists */
+	I40E_AQ_RC_EINVAL	= 14, /* Invalid argument */
+	I40E_AQ_RC_ENOTTY	= 15, /* Not a typewriter */
+	I40E_AQ_RC_ENOSPC	= 16, /* No space left or alloc failure */
+	I40E_AQ_RC_ENOSYS	= 17, /* Function not implemented */
+	I40E_AQ_RC_ERANGE	= 18, /* Parameter out of range */
+	I40E_AQ_RC_EFLUSHED	= 19, /* Cmd flushed due to prev cmd error */
+	I40E_AQ_RC_BAD_ADDR	= 20, /* Descriptor contains a bad pointer */
+	I40E_AQ_RC_EMODE	= 21, /* Op not allowed in current dev mode */
+	I40E_AQ_RC_EFBIG	= 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+	/* aq commands */
+	i40e_aqc_opc_get_version	= 0x0001,
+	i40e_aqc_opc_driver_version	= 0x0002,
+	i40e_aqc_opc_queue_shutdown	= 0x0003,
+	i40e_aqc_opc_set_pf_context	= 0x0004,
+
+	/* resource ownership */
+	i40e_aqc_opc_request_resource	= 0x0008,
+	i40e_aqc_opc_release_resource	= 0x0009,
+
+	i40e_aqc_opc_list_func_capabilities	= 0x000A,
+	i40e_aqc_opc_list_dev_capabilities	= 0x000B,
+
+	/* Proxy commands */
+	i40e_aqc_opc_set_proxy_config		= 0x0104,
+	i40e_aqc_opc_set_ns_proxy_table_entry	= 0x0105,
+
+	/* LAA */
+	i40e_aqc_opc_mac_address_read	= 0x0107,
+	i40e_aqc_opc_mac_address_write	= 0x0108,
+
+	/* PXE */
+	i40e_aqc_opc_clear_pxe_mode	= 0x0110,
+
+	/* WoL commands */
+	i40e_aqc_opc_set_wol_filter	= 0x0120,
+	i40e_aqc_opc_get_wake_reason	= 0x0121,
+
+	/* internal switch commands */
+	i40e_aqc_opc_get_switch_config		= 0x0200,
+	i40e_aqc_opc_add_statistics		= 0x0201,
+	i40e_aqc_opc_remove_statistics		= 0x0202,
+	i40e_aqc_opc_set_port_parameters	= 0x0203,
+	i40e_aqc_opc_get_switch_resource_alloc	= 0x0204,
+	i40e_aqc_opc_set_switch_config		= 0x0205,
+	i40e_aqc_opc_rx_ctl_reg_read		= 0x0206,
+	i40e_aqc_opc_rx_ctl_reg_write		= 0x0207,
+
+	i40e_aqc_opc_add_vsi			= 0x0210,
+	i40e_aqc_opc_update_vsi_parameters	= 0x0211,
+	i40e_aqc_opc_get_vsi_parameters		= 0x0212,
+
+	i40e_aqc_opc_add_pv			= 0x0220,
+	i40e_aqc_opc_update_pv_parameters	= 0x0221,
+	i40e_aqc_opc_get_pv_parameters		= 0x0222,
+
+	i40e_aqc_opc_add_veb			= 0x0230,
+	i40e_aqc_opc_update_veb_parameters	= 0x0231,
+	i40e_aqc_opc_get_veb_parameters		= 0x0232,
+
+	i40e_aqc_opc_delete_element		= 0x0243,
+
+	i40e_aqc_opc_add_macvlan		= 0x0250,
+	i40e_aqc_opc_remove_macvlan		= 0x0251,
+	i40e_aqc_opc_add_vlan			= 0x0252,
+	i40e_aqc_opc_remove_vlan		= 0x0253,
+	i40e_aqc_opc_set_vsi_promiscuous_modes	= 0x0254,
+	i40e_aqc_opc_add_tag			= 0x0255,
+	i40e_aqc_opc_remove_tag			= 0x0256,
+	i40e_aqc_opc_add_multicast_etag		= 0x0257,
+	i40e_aqc_opc_remove_multicast_etag	= 0x0258,
+	i40e_aqc_opc_update_tag			= 0x0259,
+	i40e_aqc_opc_add_control_packet_filter	= 0x025A,
+	i40e_aqc_opc_remove_control_packet_filter	= 0x025B,
+	i40e_aqc_opc_add_cloud_filters		= 0x025C,
+	i40e_aqc_opc_remove_cloud_filters	= 0x025D,
+	i40e_aqc_opc_clear_wol_switch_filters	= 0x025E,
+
+	i40e_aqc_opc_add_mirror_rule	= 0x0260,
+	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
+
+	/* Dynamic Device Personalization */
+	i40e_aqc_opc_write_personalization_profile	= 0x0270,
+	i40e_aqc_opc_get_personalization_profile_list	= 0x0271,
+
+	/* DCB commands */
+	i40e_aqc_opc_dcb_ignore_pfc	= 0x0301,
+	i40e_aqc_opc_dcb_updated	= 0x0302,
+	i40e_aqc_opc_set_dcb_parameters = 0x0303,
+
+	/* TX scheduler */
+	i40e_aqc_opc_configure_vsi_bw_limit		= 0x0400,
+	i40e_aqc_opc_configure_vsi_ets_sla_bw_limit	= 0x0406,
+	i40e_aqc_opc_configure_vsi_tc_bw		= 0x0407,
+	i40e_aqc_opc_query_vsi_bw_config		= 0x0408,
+	i40e_aqc_opc_query_vsi_ets_sla_config		= 0x040A,
+	i40e_aqc_opc_configure_switching_comp_bw_limit	= 0x0410,
+
+	i40e_aqc_opc_enable_switching_comp_ets			= 0x0413,
+	i40e_aqc_opc_modify_switching_comp_ets			= 0x0414,
+	i40e_aqc_opc_disable_switching_comp_ets			= 0x0415,
+	i40e_aqc_opc_configure_switching_comp_ets_bw_limit	= 0x0416,
+	i40e_aqc_opc_configure_switching_comp_bw_config		= 0x0417,
+	i40e_aqc_opc_query_switching_comp_ets_config		= 0x0418,
+	i40e_aqc_opc_query_port_ets_config			= 0x0419,
+	i40e_aqc_opc_query_switching_comp_bw_config		= 0x041A,
+	i40e_aqc_opc_suspend_port_tx				= 0x041B,
+	i40e_aqc_opc_resume_port_tx				= 0x041C,
+	i40e_aqc_opc_configure_partition_bw			= 0x041D,
+	/* hmc */
+	i40e_aqc_opc_query_hmc_resource_profile	= 0x0500,
+	i40e_aqc_opc_set_hmc_resource_profile	= 0x0501,
+
+	/* phy commands*/
+	i40e_aqc_opc_get_phy_abilities		= 0x0600,
+	i40e_aqc_opc_set_phy_config		= 0x0601,
+	i40e_aqc_opc_set_mac_config		= 0x0603,
+	i40e_aqc_opc_set_link_restart_an	= 0x0605,
+	i40e_aqc_opc_get_link_status		= 0x0607,
+	i40e_aqc_opc_set_phy_int_mask		= 0x0613,
+	i40e_aqc_opc_get_local_advt_reg		= 0x0614,
+	i40e_aqc_opc_set_local_advt_reg		= 0x0615,
+	i40e_aqc_opc_get_partner_advt		= 0x0616,
+	i40e_aqc_opc_set_lb_modes		= 0x0618,
+	i40e_aqc_opc_get_phy_wol_caps		= 0x0621,
+	i40e_aqc_opc_set_phy_debug		= 0x0622,
+	i40e_aqc_opc_upload_ext_phy_fm		= 0x0625,
+	i40e_aqc_opc_run_phy_activity		= 0x0626,
+	i40e_aqc_opc_set_phy_register		= 0x0628,
+	i40e_aqc_opc_get_phy_register		= 0x0629,
+
+	/* NVM commands */
+	i40e_aqc_opc_nvm_read			= 0x0701,
+	i40e_aqc_opc_nvm_erase			= 0x0702,
+	i40e_aqc_opc_nvm_update			= 0x0703,
+	i40e_aqc_opc_nvm_config_read		= 0x0704,
+	i40e_aqc_opc_nvm_config_write		= 0x0705,
+	i40e_aqc_opc_oem_post_update		= 0x0720,
+	i40e_aqc_opc_thermal_sensor		= 0x0721,
+
+	/* virtualization commands */
+	i40e_aqc_opc_send_msg_to_pf		= 0x0801,
+	i40e_aqc_opc_send_msg_to_vf		= 0x0802,
+	i40e_aqc_opc_send_msg_to_peer		= 0x0803,
+
+	/* alternate structure */
+	i40e_aqc_opc_alternate_write		= 0x0900,
+	i40e_aqc_opc_alternate_write_indirect	= 0x0901,
+	i40e_aqc_opc_alternate_read		= 0x0902,
+	i40e_aqc_opc_alternate_read_indirect	= 0x0903,
+	i40e_aqc_opc_alternate_write_done	= 0x0904,
+	i40e_aqc_opc_alternate_set_mode		= 0x0905,
+	i40e_aqc_opc_alternate_clear_port	= 0x0906,
+
+	/* LLDP commands */
+	i40e_aqc_opc_lldp_get_mib	= 0x0A00,
+	i40e_aqc_opc_lldp_update_mib	= 0x0A01,
+	i40e_aqc_opc_lldp_add_tlv	= 0x0A02,
+	i40e_aqc_opc_lldp_update_tlv	= 0x0A03,
+	i40e_aqc_opc_lldp_delete_tlv	= 0x0A04,
+	i40e_aqc_opc_lldp_stop		= 0x0A05,
+	i40e_aqc_opc_lldp_start		= 0x0A06,
+
+	/* Tunnel commands */
+	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
+	i40e_aqc_opc_del_udp_tunnel	= 0x0B01,
+	i40e_aqc_opc_set_rss_key	= 0x0B02,
+	i40e_aqc_opc_set_rss_lut	= 0x0B03,
+	i40e_aqc_opc_get_rss_key	= 0x0B04,
+	i40e_aqc_opc_get_rss_lut	= 0x0B05,
+
+	/* Async Events */
+	i40e_aqc_opc_event_lan_overflow		= 0x1001,
+
+	/* OEM commands */
+	i40e_aqc_opc_oem_parameter_change	= 0xFE00,
+	i40e_aqc_opc_oem_device_status_change	= 0xFE01,
+	i40e_aqc_opc_oem_ocsd_initialize	= 0xFE02,
+	i40e_aqc_opc_oem_ocbb_initialize	= 0xFE03,
+
+	/* debug commands */
+	i40e_aqc_opc_debug_read_reg		= 0xFF03,
+	i40e_aqc_opc_debug_write_reg		= 0xFF04,
+	i40e_aqc_opc_debug_modify_reg		= 0xFF07,
+	i40e_aqc_opc_debug_dump_internals	= 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+	{ i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X)	I40E_CHECK_STRUCT_LEN(16, X)
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+	__le32	driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING	0x1
+	u8	reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+struct i40e_aqc_vsi_properties_data {
+	/* first 96 byte are written by SW */
+	__le16	valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID		0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID		0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID		0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID		0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID	0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID	0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID	0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID	0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID		0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID		0x0200
+	/* switch section */
+	__le16	switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT		0x0000
+#define I40E_AQ_VSI_SW_ID_MASK		(0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG	0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB	0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB	0x4000
+	u8	sw_reserved[2];
+	/* security section */
+	u8	sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD	0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK	0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK	0x04
+	u8	sec_reserved;
+	/* VLAN section */
+	__le16	pvid; /* VLANS include priority bits */
+	__le16	fcoe_pvid;
+	u8	port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT	0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK	(0x03 << \
+					 I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED	0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED	0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL	0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID	0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT	0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << \
+					 I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH	0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP	0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR	0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING	0x18
+	u8	pvlan_reserved[3];
+	/* ingress egress up sections */
+	__le32	ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT	0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT	3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT	6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT	9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT	12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT	15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT	18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT	21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+	__le32	egress_table;   /* same defines as for ingress table */
+	/* cascaded PV section */
+	__le16	cas_pv_tag;
+	u8	cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT		0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK		(0x03 << \
+						 I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE		0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE		0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY		0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG		0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE		0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG	0x40
+	u8	cas_pv_reserved;
+	/* queue mapping section */
+	__le16	mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG	0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG	0x1
+	__le16	queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT		0x0
+#define I40E_AQ_VSI_QUEUE_MASK		(0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+	__le16	tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT	0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK	(0x1FF << \
+					 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT	9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK	(0x7 << \
+					 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+	/* queueing option section */
+	u8	queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA	0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA	0x08
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA	0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA	0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF	0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI	0x40
+	u8	queueing_opt_reserved[3];
+	/* scheduler section */
+	u8	up_enable_bits;
+	u8	sched_reserved;
+	/* outer up section */
+	__le32	outer_up_table; /* same structure and defines as ingress tbl */
+	u8	cmd_reserved[8];
+	/* last 32 bytes are written by FW */
+	__le16	qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID	0xFFFF
+	__le16	stat_counter_idx;
+	__le16	sched_id;
+	u8	resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+	__le16	seid;
+	__le16	switch_id;
+	__le16	veb_flags; /* only the first/last flags from 0x0230 is valid */
+	__le16	statistic_index;
+	__le16	vebs_used;
+	__le16	vebs_free;
+	u8	reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+#define I40E_LINK_SPEED_100MB_SHIFT	0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT	0x2
+#define I40E_LINK_SPEED_10GB_SHIFT	0x3
+#define I40E_LINK_SPEED_40GB_SHIFT	0x4
+#define I40E_LINK_SPEED_20GB_SHIFT	0x5
+#define I40E_LINK_SPEED_25GB_SHIFT	0x6
+
+enum i40e_aq_link_speed {
+	I40E_LINK_SPEED_UNKNOWN	= 0,
+	I40E_LINK_SPEED_100MB	= BIT(I40E_LINK_SPEED_100MB_SHIFT),
+	I40E_LINK_SPEED_1GB	= BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+	I40E_LINK_SPEED_10GB	= BIT(I40E_LINK_SPEED_10GB_SHIFT),
+	I40E_LINK_SPEED_40GB	= BIT(I40E_LINK_SPEED_40GB_SHIFT),
+	I40E_LINK_SPEED_20GB	= BIT(I40E_LINK_SPEED_20GB_SHIFT),
+	I40E_LINK_SPEED_25GB	= BIT(I40E_LINK_SPEED_25GB_SHIFT),
+};
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+	__le32	id;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID		BIT(15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+	__le16	vsi_id;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+	u8 standard_rss_key[0x28];
+	u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct  i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID		BIT(15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+	__le16	vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+				BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
+	__le16	flags;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_alloc.h b/drivers/net/ethernet/intel/iavf/i40e_alloc.h
new file mode 100644
index 000000000000..cb8689222c8b
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_alloc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+	i40e_mem_arq_buf = 0,		/* ARQ indirect command buffer */
+	i40e_mem_asq_buf = 1,
+	i40e_mem_atq_buf = 2,		/* ATQ indirect command buffer */
+	i40e_mem_arq_ring = 3,		/* ARQ descriptor ring */
+	i40e_mem_atq_ring = 4,		/* ATQ descriptor ring */
+	i40e_mem_pd = 5,		/* Page Descriptor */
+	i40e_mem_bp = 6,		/* Backing Page - 4KB */
+	i40e_mem_bp_jumbo = 7,		/* Backing Page - > 4KB */
+	i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+					    struct i40e_dma_mem *mem,
+					    enum i40e_memory_type type,
+					    u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+					struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+					     struct i40e_virt_mem *mem,
+					     u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+					 struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_common.c b/drivers/net/ethernet/intel/iavf/i40e_common.c
new file mode 100644
index 000000000000..f34091d96f49
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_common.c
@@ -0,0 +1,982 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include <linux/avf/virtchnl.h>
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+	i40e_status status = 0;
+
+	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+		switch (hw->device_id) {
+		case I40E_DEV_ID_SFP_XL710:
+		case I40E_DEV_ID_QEMU:
+		case I40E_DEV_ID_KX_B:
+		case I40E_DEV_ID_KX_C:
+		case I40E_DEV_ID_QSFP_A:
+		case I40E_DEV_ID_QSFP_B:
+		case I40E_DEV_ID_QSFP_C:
+		case I40E_DEV_ID_10G_BASE_T:
+		case I40E_DEV_ID_10G_BASE_T4:
+		case I40E_DEV_ID_20G_KR2:
+		case I40E_DEV_ID_20G_KR2_A:
+		case I40E_DEV_ID_25G_B:
+		case I40E_DEV_ID_25G_SFP28:
+			hw->mac.type = I40E_MAC_XL710;
+			break;
+		case I40E_DEV_ID_SFP_X722:
+		case I40E_DEV_ID_1G_BASE_T_X722:
+		case I40E_DEV_ID_10G_BASE_T_X722:
+		case I40E_DEV_ID_SFP_I_X722:
+			hw->mac.type = I40E_MAC_X722;
+			break;
+		case I40E_DEV_ID_X722_VF:
+			hw->mac.type = I40E_MAC_X722_VF;
+			break;
+		case I40E_DEV_ID_VF:
+		case I40E_DEV_ID_VF_HV:
+		case I40E_DEV_ID_ADAPTIVE_VF:
+			hw->mac.type = I40E_MAC_VF;
+			break;
+		default:
+			hw->mac.type = I40E_MAC_GENERIC;
+			break;
+		}
+	} else {
+		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+		  hw->mac.type, status);
+	return status;
+}
+
+/**
+ * i40evf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+	switch (aq_err) {
+	case I40E_AQ_RC_OK:
+		return "OK";
+	case I40E_AQ_RC_EPERM:
+		return "I40E_AQ_RC_EPERM";
+	case I40E_AQ_RC_ENOENT:
+		return "I40E_AQ_RC_ENOENT";
+	case I40E_AQ_RC_ESRCH:
+		return "I40E_AQ_RC_ESRCH";
+	case I40E_AQ_RC_EINTR:
+		return "I40E_AQ_RC_EINTR";
+	case I40E_AQ_RC_EIO:
+		return "I40E_AQ_RC_EIO";
+	case I40E_AQ_RC_ENXIO:
+		return "I40E_AQ_RC_ENXIO";
+	case I40E_AQ_RC_E2BIG:
+		return "I40E_AQ_RC_E2BIG";
+	case I40E_AQ_RC_EAGAIN:
+		return "I40E_AQ_RC_EAGAIN";
+	case I40E_AQ_RC_ENOMEM:
+		return "I40E_AQ_RC_ENOMEM";
+	case I40E_AQ_RC_EACCES:
+		return "I40E_AQ_RC_EACCES";
+	case I40E_AQ_RC_EFAULT:
+		return "I40E_AQ_RC_EFAULT";
+	case I40E_AQ_RC_EBUSY:
+		return "I40E_AQ_RC_EBUSY";
+	case I40E_AQ_RC_EEXIST:
+		return "I40E_AQ_RC_EEXIST";
+	case I40E_AQ_RC_EINVAL:
+		return "I40E_AQ_RC_EINVAL";
+	case I40E_AQ_RC_ENOTTY:
+		return "I40E_AQ_RC_ENOTTY";
+	case I40E_AQ_RC_ENOSPC:
+		return "I40E_AQ_RC_ENOSPC";
+	case I40E_AQ_RC_ENOSYS:
+		return "I40E_AQ_RC_ENOSYS";
+	case I40E_AQ_RC_ERANGE:
+		return "I40E_AQ_RC_ERANGE";
+	case I40E_AQ_RC_EFLUSHED:
+		return "I40E_AQ_RC_EFLUSHED";
+	case I40E_AQ_RC_BAD_ADDR:
+		return "I40E_AQ_RC_BAD_ADDR";
+	case I40E_AQ_RC_EMODE:
+		return "I40E_AQ_RC_EMODE";
+	case I40E_AQ_RC_EFBIG:
+		return "I40E_AQ_RC_EFBIG";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+	return hw->err_str;
+}
+
+/**
+ * i40evf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+	switch (stat_err) {
+	case 0:
+		return "OK";
+	case I40E_ERR_NVM:
+		return "I40E_ERR_NVM";
+	case I40E_ERR_NVM_CHECKSUM:
+		return "I40E_ERR_NVM_CHECKSUM";
+	case I40E_ERR_PHY:
+		return "I40E_ERR_PHY";
+	case I40E_ERR_CONFIG:
+		return "I40E_ERR_CONFIG";
+	case I40E_ERR_PARAM:
+		return "I40E_ERR_PARAM";
+	case I40E_ERR_MAC_TYPE:
+		return "I40E_ERR_MAC_TYPE";
+	case I40E_ERR_UNKNOWN_PHY:
+		return "I40E_ERR_UNKNOWN_PHY";
+	case I40E_ERR_LINK_SETUP:
+		return "I40E_ERR_LINK_SETUP";
+	case I40E_ERR_ADAPTER_STOPPED:
+		return "I40E_ERR_ADAPTER_STOPPED";
+	case I40E_ERR_INVALID_MAC_ADDR:
+		return "I40E_ERR_INVALID_MAC_ADDR";
+	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+	case I40E_ERR_MASTER_REQUESTS_PENDING:
+		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_INVALID_LINK_SETTINGS:
+		return "I40E_ERR_INVALID_LINK_SETTINGS";
+	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+	case I40E_ERR_RESET_FAILED:
+		return "I40E_ERR_RESET_FAILED";
+	case I40E_ERR_SWFW_SYNC:
+		return "I40E_ERR_SWFW_SYNC";
+	case I40E_ERR_NO_AVAILABLE_VSI:
+		return "I40E_ERR_NO_AVAILABLE_VSI";
+	case I40E_ERR_NO_MEMORY:
+		return "I40E_ERR_NO_MEMORY";
+	case I40E_ERR_BAD_PTR:
+		return "I40E_ERR_BAD_PTR";
+	case I40E_ERR_RING_FULL:
+		return "I40E_ERR_RING_FULL";
+	case I40E_ERR_INVALID_PD_ID:
+		return "I40E_ERR_INVALID_PD_ID";
+	case I40E_ERR_INVALID_QP_ID:
+		return "I40E_ERR_INVALID_QP_ID";
+	case I40E_ERR_INVALID_CQ_ID:
+		return "I40E_ERR_INVALID_CQ_ID";
+	case I40E_ERR_INVALID_CEQ_ID:
+		return "I40E_ERR_INVALID_CEQ_ID";
+	case I40E_ERR_INVALID_AEQ_ID:
+		return "I40E_ERR_INVALID_AEQ_ID";
+	case I40E_ERR_INVALID_SIZE:
+		return "I40E_ERR_INVALID_SIZE";
+	case I40E_ERR_INVALID_ARP_INDEX:
+		return "I40E_ERR_INVALID_ARP_INDEX";
+	case I40E_ERR_INVALID_FPM_FUNC_ID:
+		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+	case I40E_ERR_QP_INVALID_MSG_SIZE:
+		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+	case I40E_ERR_INVALID_FRAG_COUNT:
+		return "I40E_ERR_INVALID_FRAG_COUNT";
+	case I40E_ERR_QUEUE_EMPTY:
+		return "I40E_ERR_QUEUE_EMPTY";
+	case I40E_ERR_INVALID_ALIGNMENT:
+		return "I40E_ERR_INVALID_ALIGNMENT";
+	case I40E_ERR_FLUSHED_QUEUE:
+		return "I40E_ERR_FLUSHED_QUEUE";
+	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+	case I40E_ERR_TIMEOUT:
+		return "I40E_ERR_TIMEOUT";
+	case I40E_ERR_OPCODE_MISMATCH:
+		return "I40E_ERR_OPCODE_MISMATCH";
+	case I40E_ERR_CQP_COMPL_ERROR:
+		return "I40E_ERR_CQP_COMPL_ERROR";
+	case I40E_ERR_INVALID_VF_ID:
+		return "I40E_ERR_INVALID_VF_ID";
+	case I40E_ERR_INVALID_HMCFN_ID:
+		return "I40E_ERR_INVALID_HMCFN_ID";
+	case I40E_ERR_BACKING_PAGE_ERROR:
+		return "I40E_ERR_BACKING_PAGE_ERROR";
+	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+	case I40E_ERR_INVALID_PBLE_INDEX:
+		return "I40E_ERR_INVALID_PBLE_INDEX";
+	case I40E_ERR_INVALID_SD_INDEX:
+		return "I40E_ERR_INVALID_SD_INDEX";
+	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+	case I40E_ERR_INVALID_SD_TYPE:
+		return "I40E_ERR_INVALID_SD_TYPE";
+	case I40E_ERR_MEMCPY_FAILED:
+		return "I40E_ERR_MEMCPY_FAILED";
+	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+	case I40E_ERR_SRQ_ENABLED:
+		return "I40E_ERR_SRQ_ENABLED";
+	case I40E_ERR_ADMIN_QUEUE_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+	case I40E_ERR_BUF_TOO_SHORT:
+		return "I40E_ERR_BUF_TOO_SHORT";
+	case I40E_ERR_ADMIN_QUEUE_FULL:
+		return "I40E_ERR_ADMIN_QUEUE_FULL";
+	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+	case I40E_ERR_BAD_IWARP_CQE:
+		return "I40E_ERR_BAD_IWARP_CQE";
+	case I40E_ERR_NVM_BLANK_MODE:
+		return "I40E_ERR_NVM_BLANK_MODE";
+	case I40E_ERR_NOT_IMPLEMENTED:
+		return "I40E_ERR_NOT_IMPLEMENTED";
+	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+	case I40E_ERR_DIAG_TEST_FAILED:
+		return "I40E_ERR_DIAG_TEST_FAILED";
+	case I40E_ERR_NOT_READY:
+		return "I40E_ERR_NOT_READY";
+	case I40E_NOT_SUPPORTED:
+		return "I40E_NOT_SUPPORTED";
+	case I40E_ERR_FIRMWARE_API_VERSION:
+		return "I40E_ERR_FIRMWARE_API_VERSION";
+	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+	return hw->err_str;
+}
+
+/**
+ * i40evf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+		   void *buffer, u16 buf_len)
+{
+	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+	u8 *buf = (u8 *)buffer;
+
+	if ((!(mask & hw->debug_mask)) || (desc == NULL))
+		return;
+
+	i40e_debug(hw, mask,
+		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+		   le16_to_cpu(aq_desc->opcode),
+		   le16_to_cpu(aq_desc->flags),
+		   le16_to_cpu(aq_desc->datalen),
+		   le16_to_cpu(aq_desc->retval));
+	i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+		   le32_to_cpu(aq_desc->cookie_high),
+		   le32_to_cpu(aq_desc->cookie_low));
+	i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
+		   le32_to_cpu(aq_desc->params.internal.param0),
+		   le32_to_cpu(aq_desc->params.internal.param1));
+	i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
+		   le32_to_cpu(aq_desc->params.external.addr_high),
+		   le32_to_cpu(aq_desc->params.external.addr_low));
+
+	if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+		u16 len = le16_to_cpu(aq_desc->datalen);
+
+		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+		if (buf_len < len)
+			len = buf_len;
+		/* write the full 16-byte chunks */
+		if (hw->debug_mask & mask) {
+			char prefix[27];
+
+			snprintf(prefix, sizeof(prefix),
+				 "i40evf %02x:%02x.%x: \t0x",
+				 hw->bus.bus_id,
+				 hw->bus.device,
+				 hw->bus.func);
+
+			print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+				       16, 1, buf, len, false);
+		}
+	}
+}
+
+/**
+ * i40evf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40evf_check_asq_alive(struct i40e_hw *hw)
+{
+	if (hw->aq.asq.len)
+		return !!(rd32(hw, hw->aq.asq.len) &
+			  I40E_VF_ATQLEN1_ATQENABLE_MASK);
+	else
+		return false;
+}
+
+/**
+ * i40evf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+					     bool unloading)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_queue_shutdown *cmd =
+		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+	i40e_status status;
+
+	i40evf_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_queue_shutdown);
+
+	if (unloading)
+		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+	status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+					   u16 vsi_id, bool pf_lut,
+					   u8 *lut, u16 lut_size,
+					   bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_lut *cmd_resp =
+		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+	if (set)
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_set_rss_lut);
+	else
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_get_rss_lut);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+	if (pf_lut)
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+	else
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+	status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40evf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				  bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+				       false);
+}
+
+/**
+ * i40evf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				  bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key,
+				      bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_key *cmd_resp =
+			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+	if (set)
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_set_rss_key);
+	else
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_get_rss_key);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+	status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40evf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+				  u16 vsi_id,
+				  struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40evf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+				  u16 vsi_id,
+				  struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40evf_ptype_lookup[ptype].known
+ * THEN
+ *      Packet is unknown
+ * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ *      Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ *      Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+	{	PTYPE, \
+		1, \
+		I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+		I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+		I40E_RX_PTYPE_##OUTER_FRAG, \
+		I40E_RX_PTYPE_TUNNEL_##T, \
+		I40E_RX_PTYPE_TUNNEL_END_##TE, \
+		I40E_RX_PTYPE_##TEF, \
+		I40E_RX_PTYPE_INNER_PROT_##I, \
+		I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+		{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF		I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG		I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS	I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
+	/* L2 Packet types */
+	I40E_PTT_UNUSED_ENTRY(0),
+	I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
+	I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT_UNUSED_ENTRY(4),
+	I40E_PTT_UNUSED_ENTRY(5),
+	I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT_UNUSED_ENTRY(8),
+	I40E_PTT_UNUSED_ENTRY(9),
+	I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+	I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+	/* Non Tunneled IPv4 */
+	I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(25),
+	I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
+	I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+	I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+	/* IPv4 --> IPv4 */
+	I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(32),
+	I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv4 --> IPv6 */
+	I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(39),
+	I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT */
+	I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+	/* IPv4 --> GRE/NAT --> IPv4 */
+	I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(47),
+	I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT --> IPv6 */
+	I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(54),
+	I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT --> MAC */
+	I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+	/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+	I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(62),
+	I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+	I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(69),
+	I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT --> MAC/VLAN */
+	I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+	/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+	I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(77),
+	I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+	I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(84),
+	I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+	/* Non Tunneled IPv6 */
+	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
+	I40E_PTT_UNUSED_ENTRY(91),
+	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
+	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+	I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+	/* IPv6 --> IPv4 */
+	I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(98),
+	I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv6 --> IPv6 */
+	I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(105),
+	I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT */
+	I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+	/* IPv6 --> GRE/NAT -> IPv4 */
+	I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(113),
+	I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> IPv6 */
+	I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(120),
+	I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> MAC */
+	I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+	/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+	I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(128),
+	I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+	I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(135),
+	I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> MAC/VLAN */
+	I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+	I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(143),
+	I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+	I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(150),
+	I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+	/* unused entries */
+	I40E_PTT_UNUSED_ENTRY(154),
+	I40E_PTT_UNUSED_ENTRY(155),
+	I40E_PTT_UNUSED_ENTRY(156),
+	I40E_PTT_UNUSED_ENTRY(157),
+	I40E_PTT_UNUSED_ENTRY(158),
+	I40E_PTT_UNUSED_ENTRY(159),
+
+	I40E_PTT_UNUSED_ENTRY(160),
+	I40E_PTT_UNUSED_ENTRY(161),
+	I40E_PTT_UNUSED_ENTRY(162),
+	I40E_PTT_UNUSED_ENTRY(163),
+	I40E_PTT_UNUSED_ENTRY(164),
+	I40E_PTT_UNUSED_ENTRY(165),
+	I40E_PTT_UNUSED_ENTRY(166),
+	I40E_PTT_UNUSED_ENTRY(167),
+	I40E_PTT_UNUSED_ENTRY(168),
+	I40E_PTT_UNUSED_ENTRY(169),
+
+	I40E_PTT_UNUSED_ENTRY(170),
+	I40E_PTT_UNUSED_ENTRY(171),
+	I40E_PTT_UNUSED_ENTRY(172),
+	I40E_PTT_UNUSED_ENTRY(173),
+	I40E_PTT_UNUSED_ENTRY(174),
+	I40E_PTT_UNUSED_ENTRY(175),
+	I40E_PTT_UNUSED_ENTRY(176),
+	I40E_PTT_UNUSED_ENTRY(177),
+	I40E_PTT_UNUSED_ENTRY(178),
+	I40E_PTT_UNUSED_ENTRY(179),
+
+	I40E_PTT_UNUSED_ENTRY(180),
+	I40E_PTT_UNUSED_ENTRY(181),
+	I40E_PTT_UNUSED_ENTRY(182),
+	I40E_PTT_UNUSED_ENTRY(183),
+	I40E_PTT_UNUSED_ENTRY(184),
+	I40E_PTT_UNUSED_ENTRY(185),
+	I40E_PTT_UNUSED_ENTRY(186),
+	I40E_PTT_UNUSED_ENTRY(187),
+	I40E_PTT_UNUSED_ENTRY(188),
+	I40E_PTT_UNUSED_ENTRY(189),
+
+	I40E_PTT_UNUSED_ENTRY(190),
+	I40E_PTT_UNUSED_ENTRY(191),
+	I40E_PTT_UNUSED_ENTRY(192),
+	I40E_PTT_UNUSED_ENTRY(193),
+	I40E_PTT_UNUSED_ENTRY(194),
+	I40E_PTT_UNUSED_ENTRY(195),
+	I40E_PTT_UNUSED_ENTRY(196),
+	I40E_PTT_UNUSED_ENTRY(197),
+	I40E_PTT_UNUSED_ENTRY(198),
+	I40E_PTT_UNUSED_ENTRY(199),
+
+	I40E_PTT_UNUSED_ENTRY(200),
+	I40E_PTT_UNUSED_ENTRY(201),
+	I40E_PTT_UNUSED_ENTRY(202),
+	I40E_PTT_UNUSED_ENTRY(203),
+	I40E_PTT_UNUSED_ENTRY(204),
+	I40E_PTT_UNUSED_ENTRY(205),
+	I40E_PTT_UNUSED_ENTRY(206),
+	I40E_PTT_UNUSED_ENTRY(207),
+	I40E_PTT_UNUSED_ENTRY(208),
+	I40E_PTT_UNUSED_ENTRY(209),
+
+	I40E_PTT_UNUSED_ENTRY(210),
+	I40E_PTT_UNUSED_ENTRY(211),
+	I40E_PTT_UNUSED_ENTRY(212),
+	I40E_PTT_UNUSED_ENTRY(213),
+	I40E_PTT_UNUSED_ENTRY(214),
+	I40E_PTT_UNUSED_ENTRY(215),
+	I40E_PTT_UNUSED_ENTRY(216),
+	I40E_PTT_UNUSED_ENTRY(217),
+	I40E_PTT_UNUSED_ENTRY(218),
+	I40E_PTT_UNUSED_ENTRY(219),
+
+	I40E_PTT_UNUSED_ENTRY(220),
+	I40E_PTT_UNUSED_ENTRY(221),
+	I40E_PTT_UNUSED_ENTRY(222),
+	I40E_PTT_UNUSED_ENTRY(223),
+	I40E_PTT_UNUSED_ENTRY(224),
+	I40E_PTT_UNUSED_ENTRY(225),
+	I40E_PTT_UNUSED_ENTRY(226),
+	I40E_PTT_UNUSED_ENTRY(227),
+	I40E_PTT_UNUSED_ENTRY(228),
+	I40E_PTT_UNUSED_ENTRY(229),
+
+	I40E_PTT_UNUSED_ENTRY(230),
+	I40E_PTT_UNUSED_ENTRY(231),
+	I40E_PTT_UNUSED_ENTRY(232),
+	I40E_PTT_UNUSED_ENTRY(233),
+	I40E_PTT_UNUSED_ENTRY(234),
+	I40E_PTT_UNUSED_ENTRY(235),
+	I40E_PTT_UNUSED_ENTRY(236),
+	I40E_PTT_UNUSED_ENTRY(237),
+	I40E_PTT_UNUSED_ENTRY(238),
+	I40E_PTT_UNUSED_ENTRY(239),
+
+	I40E_PTT_UNUSED_ENTRY(240),
+	I40E_PTT_UNUSED_ENTRY(241),
+	I40E_PTT_UNUSED_ENTRY(242),
+	I40E_PTT_UNUSED_ENTRY(243),
+	I40E_PTT_UNUSED_ENTRY(244),
+	I40E_PTT_UNUSED_ENTRY(245),
+	I40E_PTT_UNUSED_ENTRY(246),
+	I40E_PTT_UNUSED_ENTRY(247),
+	I40E_PTT_UNUSED_ENTRY(248),
+	I40E_PTT_UNUSED_ENTRY(249),
+
+	I40E_PTT_UNUSED_ENTRY(250),
+	I40E_PTT_UNUSED_ENTRY(251),
+	I40E_PTT_UNUSED_ENTRY(252),
+	I40E_PTT_UNUSED_ENTRY(253),
+	I40E_PTT_UNUSED_ENTRY(254),
+	I40E_PTT_UNUSED_ENTRY(255)
+};
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+				enum virtchnl_ops v_opcode,
+				i40e_status v_retval,
+				u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_asq_cmd_details details;
+	i40e_status status;
+
+	i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+	desc.cookie_high = cpu_to_le32(v_opcode);
+	desc.cookie_low = cpu_to_le32(v_retval);
+	if (msglen) {
+		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
+						| I40E_AQ_FLAG_RD));
+		if (msglen > I40E_AQ_LARGE_BUF)
+			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.datalen = cpu_to_le16(msglen);
+	}
+	if (!cmd_details) {
+		memset(&details, 0, sizeof(details));
+		details.async = true;
+		cmd_details = &details;
+	}
+	status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+	return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+			     struct virtchnl_vf_resource *msg)
+{
+	struct virtchnl_vsi_resource *vsi_res;
+	int i;
+
+	vsi_res = &msg->vsi_res[0];
+
+	hw->dev_caps.num_vsis = msg->num_vsis;
+	hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+	hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+	hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+	hw->dev_caps.dcb = msg->vf_cap_flags &
+			   VIRTCHNL_VF_OFFLOAD_L2;
+	hw->dev_caps.fcoe = 0;
+	for (i = 0; i < msg->num_vsis; i++) {
+		if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
+			ether_addr_copy(hw->mac.perm_addr,
+					vsi_res->default_mac_addr);
+			ether_addr_copy(hw->mac.addr,
+					vsi_res->default_mac_addr);
+		}
+		vsi_res++;
+	}
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+i40e_status i40e_vf_reset(struct i40e_hw *hw)
+{
+	return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
+				      0, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/iavf/i40e_devids.h b/drivers/net/ethernet/intel/iavf/i40e_devids.h
new file mode 100644
index 000000000000..f300bf271824
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_devids.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710		0x1572
+#define I40E_DEV_ID_QEMU		0x1574
+#define I40E_DEV_ID_KX_B		0x1580
+#define I40E_DEV_ID_KX_C		0x1581
+#define I40E_DEV_ID_QSFP_A		0x1583
+#define I40E_DEV_ID_QSFP_B		0x1584
+#define I40E_DEV_ID_QSFP_C		0x1585
+#define I40E_DEV_ID_10G_BASE_T		0x1586
+#define I40E_DEV_ID_20G_KR2		0x1587
+#define I40E_DEV_ID_20G_KR2_A		0x1588
+#define I40E_DEV_ID_10G_BASE_T4		0x1589
+#define I40E_DEV_ID_25G_B		0x158A
+#define I40E_DEV_ID_25G_SFP28		0x158B
+#define I40E_DEV_ID_VF			0x154C
+#define I40E_DEV_ID_VF_HV		0x1571
+#define I40E_DEV_ID_ADAPTIVE_VF		0x1889
+#define I40E_DEV_ID_SFP_X722		0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_SFP_I_X722		0x37D3
+#define I40E_DEV_ID_X722_VF		0x37CD
+
+#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
+					 (d) == I40E_DEV_ID_QSFP_B  || \
+					 (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_osdep.h b/drivers/net/ethernet/intel/iavf/i40e_osdep.h
new file mode 100644
index 000000000000..3ddddb46455b
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_osdep.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...)	do {} while (0)
+
+#define wr32(a, reg, value)	writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg)		readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value)	writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg)		readq((a)->hw_addr + (reg))
+#define i40e_flush(a)		readl((a)->hw_addr + I40E_VFGEN_RSTAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+	void *va;
+	dma_addr_t pa;
+	u32 size;
+};
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+	i40evf_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+	void *va;
+	u32 size;
+};
+#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...)  i40evf_debug_d(h, m, s, ##__VA_ARGS__)
+extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+	__attribute__ ((format(gnu_printf, 3, 4)));
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_prototype.h b/drivers/net/ethernet/intel/iavf/i40e_prototype.h
new file mode 100644
index 000000000000..ef7f74489bfc
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_prototype.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include <linux/avf/virtchnl.h>
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures.  These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40evf_init_adminq(struct i40e_hw *hw);
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+					     struct i40e_arq_event_info *e,
+					     u16 *events_pending);
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+				struct i40e_aq_desc *desc,
+				void *buff, /* can be NULL */
+				u16  buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
+bool i40evf_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+		     void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40evf_resume_aq(struct i40e_hw *hw);
+bool i40evf_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+				  bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+				  bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+				  u16 seid,
+				  struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+				  u16 seid,
+				  struct i40e_aqc_get_set_rss_key_data *key);
+
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
+extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+	return i40evf_ptype_lookup[ptype];
+}
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+			     struct virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+				   enum virtchnl_ops v_opcode,
+				   i40e_status v_retval, u8 *msg, u16 msglen,
+				   struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_register.h b/drivers/net/ethernet/intel/iavf/i40e_register.h
new file mode 100644
index 000000000000..20b464ac1542
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_register.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_status.h b/drivers/net/ethernet/intel/iavf/i40e_status.h
new file mode 100644
index 000000000000..77be0702d07c
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_status.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+	I40E_SUCCESS				= 0,
+	I40E_ERR_NVM				= -1,
+	I40E_ERR_NVM_CHECKSUM			= -2,
+	I40E_ERR_PHY				= -3,
+	I40E_ERR_CONFIG				= -4,
+	I40E_ERR_PARAM				= -5,
+	I40E_ERR_MAC_TYPE			= -6,
+	I40E_ERR_UNKNOWN_PHY			= -7,
+	I40E_ERR_LINK_SETUP			= -8,
+	I40E_ERR_ADAPTER_STOPPED		= -9,
+	I40E_ERR_INVALID_MAC_ADDR		= -10,
+	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11,
+	I40E_ERR_MASTER_REQUESTS_PENDING	= -12,
+	I40E_ERR_INVALID_LINK_SETTINGS		= -13,
+	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,
+	I40E_ERR_RESET_FAILED			= -15,
+	I40E_ERR_SWFW_SYNC			= -16,
+	I40E_ERR_NO_AVAILABLE_VSI		= -17,
+	I40E_ERR_NO_MEMORY			= -18,
+	I40E_ERR_BAD_PTR			= -19,
+	I40E_ERR_RING_FULL			= -20,
+	I40E_ERR_INVALID_PD_ID			= -21,
+	I40E_ERR_INVALID_QP_ID			= -22,
+	I40E_ERR_INVALID_CQ_ID			= -23,
+	I40E_ERR_INVALID_CEQ_ID			= -24,
+	I40E_ERR_INVALID_AEQ_ID			= -25,
+	I40E_ERR_INVALID_SIZE			= -26,
+	I40E_ERR_INVALID_ARP_INDEX		= -27,
+	I40E_ERR_INVALID_FPM_FUNC_ID		= -28,
+	I40E_ERR_QP_INVALID_MSG_SIZE		= -29,
+	I40E_ERR_QP_TOOMANY_WRS_POSTED		= -30,
+	I40E_ERR_INVALID_FRAG_COUNT		= -31,
+	I40E_ERR_QUEUE_EMPTY			= -32,
+	I40E_ERR_INVALID_ALIGNMENT		= -33,
+	I40E_ERR_FLUSHED_QUEUE			= -34,
+	I40E_ERR_INVALID_PUSH_PAGE_INDEX	= -35,
+	I40E_ERR_INVALID_IMM_DATA_SIZE		= -36,
+	I40E_ERR_TIMEOUT			= -37,
+	I40E_ERR_OPCODE_MISMATCH		= -38,
+	I40E_ERR_CQP_COMPL_ERROR		= -39,
+	I40E_ERR_INVALID_VF_ID			= -40,
+	I40E_ERR_INVALID_HMCFN_ID		= -41,
+	I40E_ERR_BACKING_PAGE_ERROR		= -42,
+	I40E_ERR_NO_PBLCHUNKS_AVAILABLE		= -43,
+	I40E_ERR_INVALID_PBLE_INDEX		= -44,
+	I40E_ERR_INVALID_SD_INDEX		= -45,
+	I40E_ERR_INVALID_PAGE_DESC_INDEX	= -46,
+	I40E_ERR_INVALID_SD_TYPE		= -47,
+	I40E_ERR_MEMCPY_FAILED			= -48,
+	I40E_ERR_INVALID_HMC_OBJ_INDEX		= -49,
+	I40E_ERR_INVALID_HMC_OBJ_COUNT		= -50,
+	I40E_ERR_INVALID_SRQ_ARM_LIMIT		= -51,
+	I40E_ERR_SRQ_ENABLED			= -52,
+	I40E_ERR_ADMIN_QUEUE_ERROR		= -53,
+	I40E_ERR_ADMIN_QUEUE_TIMEOUT		= -54,
+	I40E_ERR_BUF_TOO_SHORT			= -55,
+	I40E_ERR_ADMIN_QUEUE_FULL		= -56,
+	I40E_ERR_ADMIN_QUEUE_NO_WORK		= -57,
+	I40E_ERR_BAD_IWARP_CQE			= -58,
+	I40E_ERR_NVM_BLANK_MODE			= -59,
+	I40E_ERR_NOT_IMPLEMENTED		= -60,
+	I40E_ERR_PE_DOORBELL_NOT_ENABLED	= -61,
+	I40E_ERR_DIAG_TEST_FAILED		= -62,
+	I40E_ERR_NOT_READY			= -63,
+	I40E_NOT_SUPPORTED			= -64,
+	I40E_ERR_FIRMWARE_API_VERSION		= -65,
+	I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR	= -66,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_trace.h b/drivers/net/ethernet/intel/iavf/i40e_trace.h
new file mode 100644
index 000000000000..d7a4e68820a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_trace.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for i40evf will be "i40evf".
+ *
+ * This file is named i40e_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i40evf
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I40E_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/**
+ * i40e_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_i40e{,vf}_example(args...)
+ *
+ * ... as:
+ *
+ * i40e_trace(example, args...)
+ *
+ * ... to resolve to the PF or VF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, i40e_trace_enabled(trace_name) wraps references to
+ * trace_i40e{,vf}_<trace_name>_enabled() functions.
+ */
+#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name)
+#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
+
+#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
+
+#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
+
+/* Events common to PF and VF. Corresponding versions will be defined
+ * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
+ * macro above will select the right trace point name for the driver
+ * being built from shared code.
+ */
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(
+	i40evf_tx_template,
+
+	TP_PROTO(struct i40e_ring *ring,
+		 struct i40e_tx_desc *desc,
+		 struct i40e_tx_buffer *buf),
+
+	TP_ARGS(ring, desc, buf),
+
+	/* The convention here is to make the first fields in the
+	 * TP_STRUCT match the TP_PROTO exactly. This enables the use
+	 * of the args struct generated by the tplist tool (from the
+	 * bcc-tools package) to be used for those fields. To access
+	 * fields other than the tracepoint args will require the
+	 * tplist output to be adjusted.
+	 */
+	TP_STRUCT__entry(
+		__field(void*, ring)
+		__field(void*, desc)
+		__field(void*, buf)
+		__string(devname, ring->netdev->name)
+	),
+
+	TP_fast_assign(
+		__entry->ring = ring;
+		__entry->desc = desc;
+		__entry->buf = buf;
+		__assign_str(devname, ring->netdev->name);
+	),
+
+	TP_printk(
+		"netdev: %s ring: %p desc: %p buf %p",
+		__get_str(devname), __entry->ring,
+		__entry->desc, __entry->buf)
+);
+
+DEFINE_EVENT(
+	i40evf_tx_template, i40evf_clean_tx_irq,
+	TP_PROTO(struct i40e_ring *ring,
+		 struct i40e_tx_desc *desc,
+		 struct i40e_tx_buffer *buf),
+
+	TP_ARGS(ring, desc, buf));
+
+DEFINE_EVENT(
+	i40evf_tx_template, i40evf_clean_tx_irq_unmap,
+	TP_PROTO(struct i40e_ring *ring,
+		 struct i40e_tx_desc *desc,
+		 struct i40e_tx_buffer *buf),
+
+	TP_ARGS(ring, desc, buf));
+
+DECLARE_EVENT_CLASS(
+	i40evf_rx_template,
+
+	TP_PROTO(struct i40e_ring *ring,
+		 union i40e_32byte_rx_desc *desc,
+		 struct sk_buff *skb),
+
+	TP_ARGS(ring, desc, skb),
+
+	TP_STRUCT__entry(
+		__field(void*, ring)
+		__field(void*, desc)
+		__field(void*, skb)
+		__string(devname, ring->netdev->name)
+	),
+
+	TP_fast_assign(
+		__entry->ring = ring;
+		__entry->desc = desc;
+		__entry->skb = skb;
+		__assign_str(devname, ring->netdev->name);
+	),
+
+	TP_printk(
+		"netdev: %s ring: %p desc: %p skb %p",
+		__get_str(devname), __entry->ring,
+		__entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(
+	i40evf_rx_template, i40evf_clean_rx_irq,
+	TP_PROTO(struct i40e_ring *ring,
+		 union i40e_32byte_rx_desc *desc,
+		 struct sk_buff *skb),
+
+	TP_ARGS(ring, desc, skb));
+
+DEFINE_EVENT(
+	i40evf_rx_template, i40evf_clean_rx_irq_rx,
+	TP_PROTO(struct i40e_ring *ring,
+		 union i40e_32byte_rx_desc *desc,
+		 struct sk_buff *skb),
+
+	TP_ARGS(ring, desc, skb));
+
+DECLARE_EVENT_CLASS(
+	i40evf_xmit_template,
+
+	TP_PROTO(struct sk_buff *skb,
+		 struct i40e_ring *ring),
+
+	TP_ARGS(skb, ring),
+
+	TP_STRUCT__entry(
+		__field(void*, skb)
+		__field(void*, ring)
+		__string(devname, ring->netdev->name)
+	),
+
+	TP_fast_assign(
+		__entry->skb = skb;
+		__entry->ring = ring;
+		__assign_str(devname, ring->netdev->name);
+	),
+
+	TP_printk(
+		"netdev: %s skb: %p ring: %p",
+		__get_str(devname), __entry->skb,
+		__entry->ring)
+);
+
+DEFINE_EVENT(
+	i40evf_xmit_template, i40evf_xmit_frame_ring,
+	TP_PROTO(struct sk_buff *skb,
+		 struct i40e_ring *ring),
+
+	TP_ARGS(skb, ring));
+
+DEFINE_EVENT(
+	i40evf_xmit_template, i40evf_xmit_frame_ring_drop,
+	TP_PROTO(struct sk_buff *skb,
+		 struct i40e_ring *ring),
+
+	TP_ARGS(skb, ring));
+
+/* Events unique to the VF. */
+
+#endif /* _I40E_TRACE_H_ */
+/* This must be outside ifdef _I40E_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE i40e_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/iavf/i40e_txrx.c b/drivers/net/ethernet/intel/iavf/i40e_txrx.c
new file mode 100644
index 000000000000..d4bd06adc145
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_txrx.c
@@ -0,0 +1,2513 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include <linux/prefetch.h>
+#include <net/busy_poll.h>
+
+#include "i40evf.h"
+#include "i40e_trace.h"
+#include "i40e_prototype.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+				u32 td_tag)
+{
+	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
+			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+/**
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * @ring:      the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+					    struct i40e_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+			kfree(tx_buffer->raw_buf);
+		else
+			dev_kfree_skb_any(tx_buffer->skb);
+		if (dma_unmap_len(tx_buffer, len))
+			dma_unmap_single(ring->dev,
+					 dma_unmap_addr(tx_buffer, dma),
+					 dma_unmap_len(tx_buffer, len),
+					 DMA_TO_DEVICE);
+	} else if (dma_unmap_len(tx_buffer, len)) {
+		dma_unmap_page(ring->dev,
+			       dma_unmap_addr(tx_buffer, dma),
+			       dma_unmap_len(tx_buffer, len),
+			       DMA_TO_DEVICE);
+	}
+
+	tx_buffer->next_to_watch = NULL;
+	tx_buffer->skb = NULL;
+	dma_unmap_len_set(tx_buffer, len, 0);
+	/* tx_buffer must be completely set up in the transmit path */
+}
+
+/**
+ * i40evf_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+	unsigned long bi_size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!tx_ring->tx_bi)
+		return;
+
+	/* Free all the Tx ring sk_buffs */
+	for (i = 0; i < tx_ring->count; i++)
+		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+
+	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_bi, 0, bi_size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	if (!tx_ring->netdev)
+		return;
+
+	/* cleanup Tx queue statistics */
+	netdev_tx_reset_queue(txring_txq(tx_ring));
+}
+
+/**
+ * i40evf_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
+{
+	i40evf_clean_tx_ring(tx_ring);
+	kfree(tx_ring->tx_bi);
+	tx_ring->tx_bi = NULL;
+
+	if (tx_ring->desc) {
+		dma_free_coherent(tx_ring->dev, tx_ring->size,
+				  tx_ring->desc, tx_ring->dma);
+		tx_ring->desc = NULL;
+	}
+}
+
+/**
+ * i40evf_get_tx_pending - how many Tx descriptors not processed
+ * @ring: the ring of descriptors
+ * @in_sw: is tx_pending being checked in SW or HW
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
+{
+	u32 head, tail;
+
+	/* underlying hardware might not allow access and/or always return
+	 * 0 for the head/tail registers so just use the cached values
+	 */
+	head = ring->next_to_clean;
+	tail = ring->next_to_use;
+
+	if (head != tail)
+		return (head < tail) ?
+			tail - head : (tail + ring->count - head);
+
+	return 0;
+}
+
+/**
+ * i40evf_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi:  pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
+{
+	struct i40e_ring *tx_ring = NULL;
+	struct net_device *netdev;
+	unsigned int i;
+	int packets;
+
+	if (!vsi)
+		return;
+
+	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+		return;
+
+	netdev = vsi->netdev;
+	if (!netdev)
+		return;
+
+	if (!netif_carrier_ok(netdev))
+		return;
+
+	for (i = 0; i < vsi->back->num_active_queues; i++) {
+		tx_ring = &vsi->back->tx_rings[i];
+		if (tx_ring && tx_ring->desc) {
+			/* If packet counter has not changed the queue is
+			 * likely stalled, so force an interrupt for this
+			 * queue.
+			 *
+			 * prev_pkt_ctr would be negative if there was no
+			 * pending work.
+			 */
+			packets = tx_ring->stats.packets & INT_MAX;
+			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+				i40evf_force_wb(vsi, tx_ring->q_vector);
+				continue;
+			}
+
+			/* Memory barrier between read of packet count and call
+			 * to i40evf_get_tx_pending()
+			 */
+			smp_rmb();
+			tx_ring->tx_stats.prev_pkt_ctr =
+			  i40evf_get_tx_pending(tx_ring, true) ? packets : -1;
+		}
+	}
+}
+
+#define WB_STRIDE 4
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+			      struct i40e_ring *tx_ring, int napi_budget)
+{
+	u16 i = tx_ring->next_to_clean;
+	struct i40e_tx_buffer *tx_buf;
+	struct i40e_tx_desc *tx_desc;
+	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int budget = vsi->work_limit;
+
+	tx_buf = &tx_ring->tx_bi[i];
+	tx_desc = I40E_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		smp_rmb();
+
+		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+		/* if the descriptor isn't done, no work yet to do */
+		if (!(eop_desc->cmd_type_offset_bsz &
+		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+			break;
+
+		/* clear next_to_watch to prevent false hangs */
+		tx_buf->next_to_watch = NULL;
+
+		/* update the statistics for this packet */
+		total_bytes += tx_buf->bytecount;
+		total_packets += tx_buf->gso_segs;
+
+		/* free the skb */
+		napi_consume_skb(tx_buf->skb, napi_budget);
+
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev,
+				 dma_unmap_addr(tx_buf, dma),
+				 dma_unmap_len(tx_buf, len),
+				 DMA_TO_DEVICE);
+
+		/* clear tx_buffer data */
+		tx_buf->skb = NULL;
+		dma_unmap_len_set(tx_buf, len, 0);
+
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			i40e_trace(clean_tx_irq_unmap,
+				   tx_ring, tx_desc, tx_buf);
+
+			tx_buf++;
+			tx_desc++;
+			i++;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buf = tx_ring->tx_bi;
+				tx_desc = I40E_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buf, len)) {
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buf, dma),
+					       dma_unmap_len(tx_buf, len),
+					       DMA_TO_DEVICE);
+				dma_unmap_len_set(tx_buf, len, 0);
+			}
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buf++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buf = tx_ring->tx_bi;
+			tx_desc = I40E_TX_DESC(tx_ring, 0);
+		}
+
+		prefetch(tx_desc);
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+	u64_stats_update_begin(&tx_ring->syncp);
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	u64_stats_update_end(&tx_ring->syncp);
+	tx_ring->q_vector->tx.total_bytes += total_bytes;
+	tx_ring->q_vector->tx.total_packets += total_packets;
+
+	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+		/* check to see if there are < 4 descriptors
+		 * waiting to be written back, then kick the hardware to force
+		 * them to be written back in case we stay in NAPI.
+		 * In this mode on X722 we do not enable Interrupt.
+		 */
+		unsigned int j = i40evf_get_tx_pending(tx_ring, false);
+
+		if (budget &&
+		    ((j / WB_STRIDE) == 0) && (j > 0) &&
+		    !test_bit(__I40E_VSI_DOWN, vsi->state) &&
+		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+			tx_ring->arm_wb = true;
+	}
+
+	/* notify netdev of completed buffers */
+	netdev_tx_completed_queue(txring_txq(tx_ring),
+				  total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
+	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (__netif_subqueue_stopped(tx_ring->netdev,
+					     tx_ring->queue_index) &&
+		   !test_bit(__I40E_VSI_DOWN, vsi->state)) {
+			netif_wake_subqueue(tx_ring->netdev,
+					    tx_ring->queue_index);
+			++tx_ring->tx_stats.restart_queue;
+		}
+	}
+
+	return !!budget;
+}
+
+/**
+ * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to enable writeback
+ *
+ **/
+static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
+				  struct i40e_q_vector *q_vector)
+{
+	u16 flags = q_vector->tx.ring[0].flags;
+	u32 val;
+
+	if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
+		return;
+
+	if (q_vector->arm_wb_state)
+		return;
+
+	val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
+	      I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
+
+	wr32(&vsi->back->hw,
+	     I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
+	q_vector->arm_wb_state = true;
+}
+
+/**
+ * i40evf_force_wb - Issue SW Interrupt so HW does a wb
+ * @vsi: the VSI we care about
+ * @q_vector: the vector  on which to force writeback
+ *
+ **/
+void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+	u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+		  I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+		  I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+		  I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
+		  /* allow 00 to be written to the index */;
+
+	wr32(&vsi->back->hw,
+	     I40E_VFINT_DYN_CTLN1(q_vector->reg_idx),
+	     val);
+}
+
+static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
+					struct i40e_ring_container *rc)
+{
+	return &q_vector->rx == rc;
+}
+
+static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
+{
+	unsigned int divisor;
+
+	switch (q_vector->adapter->link_speed) {
+	case I40E_LINK_SPEED_40GB:
+		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
+		break;
+	case I40E_LINK_SPEED_25GB:
+	case I40E_LINK_SPEED_20GB:
+		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
+		break;
+	default:
+	case I40E_LINK_SPEED_10GB:
+		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
+		break;
+	case I40E_LINK_SPEED_1GB:
+	case I40E_LINK_SPEED_100MB:
+		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
+		break;
+	}
+
+	return divisor;
+}
+
+/**
+ * i40e_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt.  The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern.  Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_update_itr(struct i40e_q_vector *q_vector,
+			    struct i40e_ring_container *rc)
+{
+	unsigned int avg_wire_size, packets, bytes, itr;
+	unsigned long next_update = jiffies;
+
+	/* If we don't have any rings just leave ourselves set for maximum
+	 * possible latency so we take ourselves out of the equation.
+	 */
+	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
+		return;
+
+	/* For Rx we want to push the delay up and default to low latency.
+	 * for Tx we want to pull the delay down and default to high latency.
+	 */
+	itr = i40e_container_is_rx(q_vector, rc) ?
+	      I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
+	      I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
+
+	/* If we didn't update within up to 1 - 2 jiffies we can assume
+	 * that either packets are coming in so slow there hasn't been
+	 * any work, or that there is so much work that NAPI is dealing
+	 * with interrupt moderation and we don't need to do anything.
+	 */
+	if (time_after(next_update, rc->next_update))
+		goto clear_counts;
+
+	/* If itr_countdown is set it means we programmed an ITR within
+	 * the last 4 interrupt cycles. This has a side effect of us
+	 * potentially firing an early interrupt. In order to work around
+	 * this we need to throw out any data received for a few
+	 * interrupts following the update.
+	 */
+	if (q_vector->itr_countdown) {
+		itr = rc->target_itr;
+		goto clear_counts;
+	}
+
+	packets = rc->total_packets;
+	bytes = rc->total_bytes;
+
+	if (i40e_container_is_rx(q_vector, rc)) {
+		/* If Rx there are 1 to 4 packets and bytes are less than
+		 * 9000 assume insufficient data to use bulk rate limiting
+		 * approach unless Tx is already in bulk rate limiting. We
+		 * are likely latency driven.
+		 */
+		if (packets && packets < 4 && bytes < 9000 &&
+		    (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
+			itr = I40E_ITR_ADAPTIVE_LATENCY;
+			goto adjust_by_size;
+		}
+	} else if (packets < 4) {
+		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
+		 * bulk mode and we are receiving 4 or fewer packets just
+		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
+		 * that the Rx can relax.
+		 */
+		if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
+		    (q_vector->rx.target_itr & I40E_ITR_MASK) ==
+		     I40E_ITR_ADAPTIVE_MAX_USECS)
+			goto clear_counts;
+	} else if (packets > 32) {
+		/* If we have processed over 32 packets in a single interrupt
+		 * for Tx assume we need to switch over to "bulk" mode.
+		 */
+		rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
+	}
+
+	/* We have no packets to actually measure against. This means
+	 * either one of the other queues on this vector is active or
+	 * we are a Tx queue doing TSO with too high of an interrupt rate.
+	 *
+	 * Between 4 and 56 we can assume that our current interrupt delay
+	 * is only slightly too low. As such we should increase it by a small
+	 * fixed amount.
+	 */
+	if (packets < 56) {
+		itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
+		if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+			itr &= I40E_ITR_ADAPTIVE_LATENCY;
+			itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+		}
+		goto clear_counts;
+	}
+
+	if (packets <= 256) {
+		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
+		itr &= I40E_ITR_MASK;
+
+		/* Between 56 and 112 is our "goldilocks" zone where we are
+		 * working out "just right". Just report that our current
+		 * ITR is good for us.
+		 */
+		if (packets <= 112)
+			goto clear_counts;
+
+		/* If packet count is 128 or greater we are likely looking
+		 * at a slight overrun of the delay we want. Try halving
+		 * our delay to see if that will cut the number of packets
+		 * in half per interrupt.
+		 */
+		itr /= 2;
+		itr &= I40E_ITR_MASK;
+		if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
+			itr = I40E_ITR_ADAPTIVE_MIN_USECS;
+
+		goto clear_counts;
+	}
+
+	/* The paths below assume we are dealing with a bulk ITR since
+	 * number of packets is greater than 256. We are just going to have
+	 * to compute a value and try to bring the count under control,
+	 * though for smaller packet sizes there isn't much we can do as
+	 * NAPI polling will likely be kicking in sooner rather than later.
+	 */
+	itr = I40E_ITR_ADAPTIVE_BULK;
+
+adjust_by_size:
+	/* If packet counts are 256 or greater we can assume we have a gross
+	 * overestimation of what the rate should be. Instead of trying to fine
+	 * tune it just use the formula below to try and dial in an exact value
+	 * give the current packet size of the frame.
+	 */
+	avg_wire_size = bytes / packets;
+
+	/* The following is a crude approximation of:
+	 *  wmem_default / (size + overhead) = desired_pkts_per_int
+	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
+	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+	 *
+	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
+	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+	 * formula down to
+	 *
+	 *  (170 * (size + 24)) / (size + 640) = ITR
+	 *
+	 * We first do some math on the packet size and then finally bitshift
+	 * by 8 after rounding up. We also have to account for PCIe link speed
+	 * difference as ITR scales based on this.
+	 */
+	if (avg_wire_size <= 60) {
+		/* Start at 250k ints/sec */
+		avg_wire_size = 4096;
+	} else if (avg_wire_size <= 380) {
+		/* 250K ints/sec to 60K ints/sec */
+		avg_wire_size *= 40;
+		avg_wire_size += 1696;
+	} else if (avg_wire_size <= 1084) {
+		/* 60K ints/sec to 36K ints/sec */
+		avg_wire_size *= 15;
+		avg_wire_size += 11452;
+	} else if (avg_wire_size <= 1980) {
+		/* 36K ints/sec to 30K ints/sec */
+		avg_wire_size *= 5;
+		avg_wire_size += 22420;
+	} else {
+		/* plateau at a limit of 30K ints/sec */
+		avg_wire_size = 32256;
+	}
+
+	/* If we are in low latency mode halve our delay which doubles the
+	 * rate to somewhere between 100K to 16K ints/sec
+	 */
+	if (itr & I40E_ITR_ADAPTIVE_LATENCY)
+		avg_wire_size /= 2;
+
+	/* Resultant value is 256 times larger than it needs to be. This
+	 * gives us room to adjust the value as needed to either increase
+	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
+	 *
+	 * Use addition as we have already recorded the new latency flag
+	 * for the ITR value.
+	 */
+	itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
+	       I40E_ITR_ADAPTIVE_MIN_INC;
+
+	if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+		itr &= I40E_ITR_ADAPTIVE_LATENCY;
+		itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+	}
+
+clear_counts:
+	/* write back value */
+	rc->target_itr = itr;
+
+	/* next update should occur within next jiffy */
+	rc->next_update = next_update + 1;
+
+	rc->total_bytes = 0;
+	rc->total_packets = 0;
+}
+
+/**
+ * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+	struct device *dev = tx_ring->dev;
+	int bi_size;
+
+	if (!dev)
+		return -ENOMEM;
+
+	/* warn if we are about to overwrite the pointer */
+	WARN_ON(tx_ring->tx_bi);
+	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+	if (!tx_ring->tx_bi)
+		goto err;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+					   &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc) {
+		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+			 tx_ring->size);
+		goto err;
+	}
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->tx_stats.prev_pkt_ctr = -1;
+	return 0;
+
+err:
+	kfree(tx_ring->tx_bi);
+	tx_ring->tx_bi = NULL;
+	return -ENOMEM;
+}
+
+/**
+ * i40evf_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+	unsigned long bi_size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!rx_ring->rx_bi)
+		return;
+
+	if (rx_ring->skb) {
+		dev_kfree_skb(rx_ring->skb);
+		rx_ring->skb = NULL;
+	}
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
+		if (!rx_bi->page)
+			continue;
+
+		/* Invalidate cache lines that may have been written to by
+		 * device so that we avoid corrupting memory.
+		 */
+		dma_sync_single_range_for_cpu(rx_ring->dev,
+					      rx_bi->dma,
+					      rx_bi->page_offset,
+					      rx_ring->rx_buf_len,
+					      DMA_FROM_DEVICE);
+
+		/* free resources associated with mapping */
+		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+				     i40e_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+				     I40E_RX_DMA_ATTR);
+
+		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
+
+		rx_bi->page = NULL;
+		rx_bi->page_offset = 0;
+	}
+
+	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_bi, 0, bi_size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40evf_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
+{
+	i40evf_clean_rx_ring(rx_ring);
+	kfree(rx_ring->rx_bi);
+	rx_ring->rx_bi = NULL;
+
+	if (rx_ring->desc) {
+		dma_free_coherent(rx_ring->dev, rx_ring->size,
+				  rx_ring->desc, rx_ring->dma);
+		rx_ring->desc = NULL;
+	}
+}
+
+/**
+ * i40evf_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	int bi_size;
+
+	/* warn if we are about to overwrite the pointer */
+	WARN_ON(rx_ring->rx_bi);
+	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+	if (!rx_ring->rx_bi)
+		goto err;
+
+	u64_stats_init(&rx_ring->syncp);
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+					   &rx_ring->dma, GFP_KERNEL);
+
+	if (!rx_ring->desc) {
+		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+			 rx_ring->size);
+		goto err;
+	}
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	return 0;
+err:
+	kfree(rx_ring->rx_bi);
+	rx_ring->rx_bi = NULL;
+	return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+	rx_ring->next_to_use = val;
+
+	/* update next to alloc since we have filled the ring */
+	rx_ring->next_to_alloc = val;
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+	return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
+ *
+ * Returns true if the page was successfully allocated or
+ * reused.
+ **/
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+				   struct i40e_rx_buffer *bi)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page)) {
+		rx_ring->rx_stats.page_reuse_count++;
+		return true;
+	}
+
+	/* alloc new page for storage */
+	page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_page_failed++;
+		return false;
+	}
+
+	/* map page for use */
+	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+				 i40e_rx_pg_size(rx_ring),
+				 DMA_FROM_DEVICE,
+				 I40E_RX_DMA_ATTR);
+
+	/* if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, i40e_rx_pg_order(rx_ring));
+		rx_ring->rx_stats.alloc_page_failed++;
+		return false;
+	}
+
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = i40e_rx_offset(rx_ring);
+
+	/* initialize pagecnt_bias to 1 representing we fully own page */
+	bi->pagecnt_bias = 1;
+
+	return true;
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring:  rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+			     struct sk_buff *skb, u16 vlan_tag)
+{
+	struct i40e_q_vector *q_vector = rx_ring->q_vector;
+
+	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+	    (vlan_tag & VLAN_VID_MASK))
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+	napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40evf_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ *
+ * Returns false if all allocations were successful, true if any fail
+ **/
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+	u16 ntu = rx_ring->next_to_use;
+	union i40e_rx_desc *rx_desc;
+	struct i40e_rx_buffer *bi;
+
+	/* do nothing if no valid netdev defined */
+	if (!rx_ring->netdev || !cleaned_count)
+		return false;
+
+	rx_desc = I40E_RX_DESC(rx_ring, ntu);
+	bi = &rx_ring->rx_bi[ntu];
+
+	do {
+		if (!i40e_alloc_mapped_page(rx_ring, bi))
+			goto no_buffers;
+
+		/* sync the buffer for use by the device */
+		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+						 bi->page_offset,
+						 rx_ring->rx_buf_len,
+						 DMA_FROM_DEVICE);
+
+		/* Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+
+		rx_desc++;
+		bi++;
+		ntu++;
+		if (unlikely(ntu == rx_ring->count)) {
+			rx_desc = I40E_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_bi;
+			ntu = 0;
+		}
+
+		/* clear the status bits for the next_to_use descriptor */
+		rx_desc->wb.qword1.status_error_len = 0;
+
+		cleaned_count--;
+	} while (cleaned_count);
+
+	if (rx_ring->next_to_use != ntu)
+		i40e_release_rx_desc(rx_ring, ntu);
+
+	return false;
+
+no_buffers:
+	if (rx_ring->next_to_use != ntu)
+		i40e_release_rx_desc(rx_ring, ntu);
+
+	/* make sure to come back via polling to try again after
+	 * allocation failure
+	 */
+	return true;
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+				    struct sk_buff *skb,
+				    union i40e_rx_desc *rx_desc)
+{
+	struct i40e_rx_ptype_decoded decoded;
+	u32 rx_error, rx_status;
+	bool ipv4, ipv6;
+	u8 ptype;
+	u64 qword;
+
+	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+	ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+	rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+		   I40E_RXD_QW1_ERROR_SHIFT;
+	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+		    I40E_RXD_QW1_STATUS_SHIFT;
+	decoded = decode_rx_desc_ptype(ptype);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	skb_checksum_none_assert(skb);
+
+	/* Rx csum enabled and ip headers found? */
+	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* did the hardware decode the packet and checksum? */
+	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+		return;
+
+	/* both known and outer_ip must be set for the below code to work */
+	if (!(decoded.known && decoded.outer_ip))
+		return;
+
+	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
+	ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
+
+	if (ipv4 &&
+	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+		goto checksum_fail;
+
+	/* likely incorrect csum if alternate IP extension headers found */
+	if (ipv6 &&
+	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+		/* don't increment checksum err here, non-fatal err */
+		return;
+
+	/* there was some L4 error, count error and punt packet to the stack */
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
+		goto checksum_fail;
+
+	/* handle packets that were not able to be checksummed due
+	 * to arrival speed, in this case the stack can compute
+	 * the csum.
+	 */
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
+		return;
+
+	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
+	switch (decoded.inner_prot) {
+	case I40E_RX_PTYPE_INNER_PROT_TCP:
+	case I40E_RX_PTYPE_INNER_PROT_UDP:
+	case I40E_RX_PTYPE_INNER_PROT_SCTP:
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		/* fall though */
+	default:
+		break;
+	}
+
+	return;
+
+checksum_fail:
+	vsi->back->hw_csum_rx_error++;
+}
+
+/**
+ * i40e_ptype_to_htype - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline int i40e_ptype_to_htype(u8 ptype)
+{
+	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+	if (!decoded.known)
+		return PKT_HASH_TYPE_NONE;
+
+	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+		return PKT_HASH_TYPE_L4;
+	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+		return PKT_HASH_TYPE_L3;
+	else
+		return PKT_HASH_TYPE_L2;
+}
+
+/**
+ * i40e_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ * @skb: skb currently being received and modified
+ * @rx_ptype: Rx packet type
+ **/
+static inline void i40e_rx_hash(struct i40e_ring *ring,
+				union i40e_rx_desc *rx_desc,
+				struct sk_buff *skb,
+				u8 rx_ptype)
+{
+	u32 hash;
+	const __le64 rss_mask =
+		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
+		return;
+
+	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
+		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+	}
+}
+
+/**
+ * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
+ **/
+static inline
+void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
+			       union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+			       u8 rx_ptype)
+{
+	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+
+	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
+
+	skb_record_rx_queue(skb, rx_ring->queue_index);
+
+	/* modifies the skb - consumes the enet header */
+	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+	/* if eth_skb_pad returns an error the skb was freed */
+	if (eth_skb_pad(skb))
+		return true;
+
+	return false;
+}
+
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+			       struct i40e_rx_buffer *old_buff)
+{
+	struct i40e_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_bi[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/* transfer page from old buffer to new buffer */
+	new_buff->dma		= old_buff->dma;
+	new_buff->page		= old_buff->page;
+	new_buff->page_offset	= old_buff->page_offset;
+	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
+}
+
+/**
+ * i40e_page_is_reusable - check if any reuse is possible
+ * @page: page struct to check
+ *
+ * A page is not reusable if it was allocated under low memory
+ * conditions, or it's not in the same NUMA node as this CPU.
+ */
+static inline bool i40e_page_is_reusable(struct page *page)
+{
+	return (page_to_nid(page) == numa_mem_id()) &&
+		!page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * the adapter for another receive
+ *
+ * @rx_buffer: buffer containing the page
+ *
+ * If page is reusable, rx_buffer->page_offset is adjusted to point to
+ * an unused region in the page.
+ *
+ * For small pages, @truesize will be a constant value, half the size
+ * of the memory at page.  We'll attempt to alternate between high and
+ * low halves of the page, with one half ready for use by the hardware
+ * and the other half being consumed by the stack.  We use the page
+ * ref count to determine whether the stack has finished consuming the
+ * portion of this page that was passed up with a previous packet.  If
+ * the page ref count is >1, we'll assume the "other" half page is
+ * still busy, and this page cannot be reused.
+ *
+ * For larger pages, @truesize will be the actual space used by the
+ * received packet (adjusted upward to an even multiple of the cache
+ * line size).  This will advance through the page by the amount
+ * actually consumed by the received packets while there is still
+ * space for a buffer.  Each region of larger pages will be used at
+ * most once, after which the page will not be reused.
+ *
+ * In either case, if the page is reusable its refcount is increased.
+ **/
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+{
+	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+	struct page *page = rx_buffer->page;
+
+	/* Is any reuse possible? */
+	if (unlikely(!i40e_page_is_reusable(page)))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+	if (unlikely((page_count(page) - pagecnt_bias) > 1))
+		return false;
+#else
+#define I40E_LAST_OFFSET \
+	(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
+	if (rx_buffer->page_offset > I40E_LAST_OFFSET)
+		return false;
+#endif
+
+	/* If we have drained the page fragment pool we need to update
+	 * the pagecnt_bias and page count so that we fully restock the
+	 * number of references the driver holds.
+	 */
+	if (unlikely(!pagecnt_bias)) {
+		page_ref_add(page, USHRT_MAX);
+		rx_buffer->pagecnt_bias = USHRT_MAX;
+	}
+
+	return true;
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * It will just attach the page as a frag to the skb.
+ *
+ * The function will then update the page offset.
+ **/
+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
+			     struct i40e_rx_buffer *rx_buffer,
+			     struct sk_buff *skb,
+			     unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
+#endif
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+			rx_buffer->page_offset, size, truesize);
+
+	/* page is being used so we must update the page offset */
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+}
+
+/**
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @size: size of buffer to add to skb
+ *
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
+ */
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+						 const unsigned int size)
+{
+	struct i40e_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+	prefetchw(rx_buffer->page);
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev,
+				      rx_buffer->dma,
+				      rx_buffer->page_offset,
+				      size,
+				      DMA_FROM_DEVICE);
+
+	/* We have pulled a buffer for use, so decrement pagecnt_bias */
+	rx_buffer->pagecnt_bias--;
+
+	return rx_buffer;
+}
+
+/**
+ * i40e_construct_skb - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function allocates an skb.  It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+					  struct i40e_rx_buffer *rx_buffer,
+					  unsigned int size)
+{
+	void *va;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+	unsigned int headlen;
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* allocate a skb to store the frags */
+	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+			       I40E_RX_HDR_SIZE,
+			       GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* Determine available headroom for copy */
+	headlen = size;
+	if (headlen > I40E_RX_HDR_SIZE)
+		headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+	/* update all of the pointers */
+	size -= headlen;
+	if (size) {
+		skb_add_rx_frag(skb, 0, rx_buffer->page,
+				rx_buffer->page_offset + headlen,
+				size, truesize);
+
+		/* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+		rx_buffer->page_offset ^= truesize;
+#else
+		rx_buffer->page_offset += truesize;
+#endif
+	} else {
+		/* buffer is unused, reset bias back to rx_buffer */
+		rx_buffer->pagecnt_bias++;
+	}
+
+	return skb;
+}
+
+/**
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buffer: Rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+				      struct i40e_rx_buffer *rx_buffer,
+				      unsigned int size)
+{
+	void *va;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+				SKB_DATA_ALIGN(I40E_SKB_PAD + size);
+#endif
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+	/* build an skb around the page buffer */
+	skb = build_skb(va - I40E_SKB_PAD, truesize);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* update pointers within the skb to store the data */
+	skb_reserve(skb, I40E_SKB_PAD);
+	__skb_put(skb, size);
+
+	/* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+
+	return skb;
+}
+
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer.  It will
+ * either recycle the buffer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+			       struct i40e_rx_buffer *rx_buffer)
+{
+	if (i40e_can_reuse_rx_page(rx_buffer)) {
+		/* hand second half of page back to the ring */
+		i40e_reuse_rx_page(rx_ring, rx_buffer);
+		rx_ring->rx_stats.page_reuse_count++;
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     i40e_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+	}
+
+	/* clear contents of buffer_info */
+	rx_buffer->page = NULL;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+			    union i40e_rx_desc *rx_desc,
+			    struct sk_buff *skb)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+		return false;
+
+	rx_ring->rx_stats.non_eop_descs++;
+
+	return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	struct sk_buff *skb = rx_ring->skb;
+	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+	bool failure = false;
+
+	while (likely(total_rx_packets < (unsigned int)budget)) {
+		struct i40e_rx_buffer *rx_buffer;
+		union i40e_rx_desc *rx_desc;
+		unsigned int size;
+		u16 vlan_tag;
+		u8 rx_ptype;
+		u64 qword;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+			failure = failure ||
+				  i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		/* status_error_len will always be zero for unused descriptors
+		 * because it's cleared in cleanup, and overlaps with hdr_addr
+		 * which is always zero because packet split isn't used, if the
+		 * hardware wrote DD then the length will be non-zero
+		 */
+		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we have
+		 * verified the descriptor has been written back.
+		 */
+		dma_rmb();
+
+		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+		if (!size)
+			break;
+
+		i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+		rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+		/* retrieve a buffer from the ring */
+		if (skb)
+			i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+		else if (ring_uses_build_skb(rx_ring))
+			skb = i40e_build_skb(rx_ring, rx_buffer, size);
+		else
+			skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb) {
+			rx_ring->rx_stats.alloc_buff_failed++;
+			rx_buffer->pagecnt_bias++;
+			break;
+		}
+
+		i40e_put_rx_buffer(rx_ring, rx_buffer);
+		cleaned_count++;
+
+		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
+			continue;
+
+		/* ERR_MASK will only have valid bits if EOP set, and
+		 * what we are doing here is actually checking
+		 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+		 * the error field
+		 */
+		if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
+			dev_kfree_skb_any(skb);
+			skb = NULL;
+			continue;
+		}
+
+		if (i40e_cleanup_headers(rx_ring, skb)) {
+			skb = NULL;
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+			   I40E_RXD_QW1_PTYPE_SHIFT;
+
+		/* populate checksum, VLAN, and protocol */
+		i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+
+
+		vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+			   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+
+		i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
+		i40e_receive_skb(rx_ring, skb, vlan_tag);
+		skb = NULL;
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	rx_ring->skb = skb;
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	u64_stats_update_end(&rx_ring->syncp);
+	rx_ring->q_vector->rx.total_packets += total_rx_packets;
+	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+	/* guarantee a trip back through this routine if there was a failure */
+	return failure ? budget : (int)total_rx_packets;
+}
+
+static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+{
+	u32 val;
+
+	/* We don't bother with setting the CLEARPBA bit as the data sheet
+	 * points out doing so is "meaningless since it was already
+	 * auto-cleared". The auto-clearing happens when the interrupt is
+	 * asserted.
+	 *
+	 * Hardware errata 28 for also indicates that writing to a
+	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
+	 * an event in the PBA anyway so we need to rely on the automask
+	 * to hold pending events for us until the interrupt is re-enabled
+	 *
+	 * The itr value is reported in microseconds, and the register
+	 * value is recorded in 2 microsecond units. For this reason we
+	 * only need to shift by the interval shift - 1 instead of the
+	 * full value.
+	 */
+	itr &= I40E_ITR_MASK;
+
+	val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+	      (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+	      (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
+
+	return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_VFINT_DYN_CTLN1
+
+/* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+ * updated we make the adaptive scheme wait until either the ITR completely
+ * expires via the next_update expiration or we have been through at least
+ * 3 interrupts.
+ */
+#define ITR_COUNTDOWN_START 3
+
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+					  struct i40e_q_vector *q_vector)
+{
+	struct i40e_hw *hw = &vsi->back->hw;
+	u32 intval;
+
+	/* These will do nothing if dynamic updates are not enabled */
+	i40e_update_itr(q_vector, &q_vector->tx);
+	i40e_update_itr(q_vector, &q_vector->rx);
+
+	/* This block of logic allows us to get away with only updating
+	 * one ITR value with each interrupt. The idea is to perform a
+	 * pseudo-lazy update with the following criteria.
+	 *
+	 * 1. Rx is given higher priority than Tx if both are in same state
+	 * 2. If we must reduce an ITR that is given highest priority.
+	 * 3. We then give priority to increasing ITR based on amount.
+	 */
+	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+		/* Rx ITR needs to be reduced, this is highest priority */
+		intval = i40e_buildreg_itr(I40E_RX_ITR,
+					   q_vector->rx.target_itr);
+		q_vector->rx.current_itr = q_vector->rx.target_itr;
+		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
+		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
+		/* Tx ITR needs to be reduced, this is second priority
+		 * Tx ITR needs to be increased more than Rx, fourth priority
+		 */
+		intval = i40e_buildreg_itr(I40E_TX_ITR,
+					   q_vector->tx.target_itr);
+		q_vector->tx.current_itr = q_vector->tx.target_itr;
+		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+		/* Rx ITR needs to be increased, third priority */
+		intval = i40e_buildreg_itr(I40E_RX_ITR,
+					   q_vector->rx.target_itr);
+		q_vector->rx.current_itr = q_vector->rx.target_itr;
+		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+	} else {
+		/* No ITR update, lowest priority */
+		intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+		if (q_vector->itr_countdown)
+			q_vector->itr_countdown--;
+	}
+
+	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
+		wr32(hw, INTREG(q_vector->reg_idx), intval);
+}
+
+/**
+ * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40evf_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct i40e_q_vector *q_vector =
+			       container_of(napi, struct i40e_q_vector, napi);
+	struct i40e_vsi *vsi = q_vector->vsi;
+	struct i40e_ring *ring;
+	bool clean_complete = true;
+	bool arm_wb = false;
+	int budget_per_ring;
+	int work_done = 0;
+
+	if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
+		napi_complete(napi);
+		return 0;
+	}
+
+	/* Since the actual Tx work is minimal, we can give the Tx a larger
+	 * budget and be more aggressive about cleaning up the Tx descriptors.
+	 */
+	i40e_for_each_ring(ring, q_vector->tx) {
+		if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+			clean_complete = false;
+			continue;
+		}
+		arm_wb |= ring->arm_wb;
+		ring->arm_wb = false;
+	}
+
+	/* Handle case where we are called by netpoll with a budget of 0 */
+	if (budget <= 0)
+		goto tx_only;
+
+	/* We attempt to distribute budget to each Rx queue fairly, but don't
+	 * allow the budget to go below 1 because that would exit polling early.
+	 */
+	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+
+	i40e_for_each_ring(ring, q_vector->rx) {
+		int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
+
+		work_done += cleaned;
+		/* if we clean as many as budgeted, we must not be done */
+		if (cleaned >= budget_per_ring)
+			clean_complete = false;
+	}
+
+	/* If work not completed, return budget and polling will return */
+	if (!clean_complete) {
+		int cpu_id = smp_processor_id();
+
+		/* It is possible that the interrupt affinity has changed but,
+		 * if the cpu is pegged at 100%, polling will never exit while
+		 * traffic continues and the interrupt will be stuck on this
+		 * cpu.  We check to make sure affinity is correct before we
+		 * continue to poll, otherwise we must stop polling so the
+		 * interrupt can move to the correct cpu.
+		 */
+		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
+			/* Tell napi that we are done polling */
+			napi_complete_done(napi, work_done);
+
+			/* Force an interrupt */
+			i40evf_force_wb(vsi, q_vector);
+
+			/* Return budget-1 so that polling stops */
+			return budget - 1;
+		}
+tx_only:
+		if (arm_wb) {
+			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
+			i40e_enable_wb_on_itr(vsi, q_vector);
+		}
+		return budget;
+	}
+
+	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+		q_vector->arm_wb_state = false;
+
+	/* Work is done so exit the polling mode and re-enable the interrupt */
+	napi_complete_done(napi, work_done);
+
+	i40e_update_enable_itr(vsi, q_vector);
+
+	return min(work_done, budget - 1);
+}
+
+/**
+ * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags:   the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise  returns 0 to indicate the flags has been set properly.
+ **/
+static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
+					       struct i40e_ring *tx_ring,
+					       u32 *flags)
+{
+	__be16 protocol = skb->protocol;
+	u32  tx_flags = 0;
+
+	if (protocol == htons(ETH_P_8021Q) &&
+	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+		/* When HW VLAN acceleration is turned off by the user the
+		 * stack sets the protocol to 8021q so that the driver
+		 * can take any steps required to support the SW only
+		 * VLAN handling.  In our case the driver doesn't need
+		 * to take any further steps so just set the protocol
+		 * to the encapsulated ethertype.
+		 */
+		skb->protocol = vlan_get_protocol(skb);
+		goto out;
+	}
+
+	/* if we have a HW VLAN tag being added, default to the HW one */
+	if (skb_vlan_tag_present(skb)) {
+		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+	/* else if it is a SW VLAN, check the next protocol and store the tag */
+	} else if (protocol == htons(ETH_P_8021Q)) {
+		struct vlan_hdr *vhdr, _vhdr;
+
+		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+		if (!vhdr)
+			return -EINVAL;
+
+		protocol = vhdr->h_vlan_encapsulated_proto;
+		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+	}
+
+out:
+	*flags = tx_flags;
+	return 0;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @first:    pointer to first Tx buffer for xmit
+ * @hdr_len:  ptr to the size of the packet header
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+		    u64 *cd_type_cmd_tso_mss)
+{
+	struct sk_buff *skb = first->skb;
+	u64 cd_cmd, cd_tso_len, cd_mss;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+	u32 paylen, l4_offset;
+	u16 gso_segs, gso_size;
+	int err;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	if (!skb_is_gso(skb))
+		return 0;
+
+	err = skb_cow_head(skb, 0);
+	if (err < 0)
+		return err;
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	/* initialize outer IP header fields */
+	if (ip.v4->version == 4) {
+		ip.v4->tot_len = 0;
+		ip.v4->check = 0;
+	} else {
+		ip.v6->payload_len = 0;
+	}
+
+	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+					 SKB_GSO_GRE_CSUM |
+					 SKB_GSO_IPXIP4 |
+					 SKB_GSO_IPXIP6 |
+					 SKB_GSO_UDP_TUNNEL |
+					 SKB_GSO_UDP_TUNNEL_CSUM)) {
+		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+			l4.udp->len = 0;
+
+			/* determine offset of outer transport header */
+			l4_offset = l4.hdr - skb->data;
+
+			/* remove payload length from outer checksum */
+			paylen = skb->len - l4_offset;
+			csum_replace_by_diff(&l4.udp->check,
+					     (__force __wsum)htonl(paylen));
+		}
+
+		/* reset pointers to inner headers */
+		ip.hdr = skb_inner_network_header(skb);
+		l4.hdr = skb_inner_transport_header(skb);
+
+		/* initialize inner IP header fields */
+		if (ip.v4->version == 4) {
+			ip.v4->tot_len = 0;
+			ip.v4->check = 0;
+		} else {
+			ip.v6->payload_len = 0;
+		}
+	}
+
+	/* determine offset of inner transport header */
+	l4_offset = l4.hdr - skb->data;
+
+	/* remove payload length from inner checksum */
+	paylen = skb->len - l4_offset;
+	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+
+	/* compute length of segmentation header */
+	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+	/* pull values out of skb_shinfo */
+	gso_size = skb_shinfo(skb)->gso_size;
+	gso_segs = skb_shinfo(skb)->gso_segs;
+
+	/* update GSO size and bytecount with header size */
+	first->gso_segs = gso_segs;
+	first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
+	/* find the field values */
+	cd_cmd = I40E_TX_CTX_DESC_TSO;
+	cd_tso_len = skb->len - *hdr_len;
+	cd_mss = gso_size;
+	*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+				(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+				(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+	return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: pointer to Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+			       u32 *td_cmd, u32 *td_offset,
+			       struct i40e_ring *tx_ring,
+			       u32 *cd_tunneling)
+{
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+	unsigned char *exthdr;
+	u32 offset, cmd = 0;
+	__be16 frag_off;
+	u8 l4_proto = 0;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	/* compute outer L2 header size */
+	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	if (skb->encapsulation) {
+		u32 tunnel = 0;
+		/* define outer network header type */
+		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+				  I40E_TX_CTX_EXT_IP_IPV4 :
+				  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+
+			l4_proto = ip.v4->protocol;
+		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+			tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
+
+			exthdr = ip.hdr + sizeof(*ip.v6);
+			l4_proto = ip.v6->nexthdr;
+			if (l4.hdr != exthdr)
+				ipv6_skip_exthdr(skb, exthdr - skb->data,
+						 &l4_proto, &frag_off);
+		}
+
+		/* define outer transport */
+		switch (l4_proto) {
+		case IPPROTO_UDP:
+			tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
+			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+			break;
+		case IPPROTO_GRE:
+			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
+			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+			break;
+		case IPPROTO_IPIP:
+		case IPPROTO_IPV6:
+			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+			l4.hdr = skb_inner_network_header(skb);
+			break;
+		default:
+			if (*tx_flags & I40E_TX_FLAGS_TSO)
+				return -1;
+
+			skb_checksum_help(skb);
+			return 0;
+		}
+
+		/* compute outer L3 header size */
+		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+		/* switch IP header pointer from outer to inner header */
+		ip.hdr = skb_inner_network_header(skb);
+
+		/* compute tunnel header size */
+		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
+			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+		/* indicate if we need to offload outer UDP header */
+		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+
+		/* record tunnel offload values */
+		*cd_tunneling |= tunnel;
+
+		/* switch L4 header pointer from outer to inner */
+		l4.hdr = skb_inner_transport_header(skb);
+		l4_proto = 0;
+
+		/* reset type as we transition from outer to inner headers */
+		*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
+		if (ip.v4->version == 4)
+			*tx_flags |= I40E_TX_FLAGS_IPV4;
+		if (ip.v6->version == 6)
+			*tx_flags |= I40E_TX_FLAGS_IPV6;
+	}
+
+	/* Enable IP checksum offloads */
+	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+		l4_proto = ip.v4->protocol;
+		/* the stack computes the IP header already, the only time we
+		 * need the hardware to recompute it is in the case of TSO.
+		 */
+		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
+		       I40E_TX_DESC_CMD_IIPT_IPV4;
+	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+
+		exthdr = ip.hdr + sizeof(*ip.v6);
+		l4_proto = ip.v6->nexthdr;
+		if (l4.hdr != exthdr)
+			ipv6_skip_exthdr(skb, exthdr - skb->data,
+					 &l4_proto, &frag_off);
+	}
+
+	/* compute inner L3 header size */
+	offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+
+	/* Enable L4 checksum offloads */
+	switch (l4_proto) {
+	case IPPROTO_TCP:
+		/* enable checksum offloads */
+		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		break;
+	case IPPROTO_SCTP:
+		/* enable SCTP checksum offload */
+		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct sctphdr) >> 2) <<
+			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		break;
+	case IPPROTO_UDP:
+		/* enable UDP checksum offload */
+		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct udphdr) >> 2) <<
+			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		break;
+	default:
+		if (*tx_flags & I40E_TX_FLAGS_TSO)
+			return -1;
+		skb_checksum_help(skb);
+		return 0;
+	}
+
+	*td_cmd |= cmd;
+	*td_offset |= offset;
+
+	return 1;
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring:  ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+			       const u64 cd_type_cmd_tso_mss,
+			       const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+	struct i40e_tx_context_desc *context_desc;
+	int i = tx_ring->next_to_use;
+
+	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+	    !cd_tunneling && !cd_l2tag2)
+		return;
+
+	/* grab the next descriptor */
+	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+	i++;
+	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+	/* cpu_to_le32 and assign to struct fields */
+	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+	context_desc->rsvd = cpu_to_le16(0);
+	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
+ * @skb:      send buffer
+ *
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
+ **/
+bool __i40evf_chk_linearize(struct sk_buff *skb)
+{
+	const struct skb_frag_struct *frag, *stale;
+	int nr_frags, sum;
+
+	/* no need to check if number of frags is less than 7 */
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
+		return false;
+
+	/* We need to walk through the list and validate that each group
+	 * of 6 fragments totals at least gso_size.
+	 */
+	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
+	frag = &skb_shinfo(skb)->frags[0];
+
+	/* Initialize size to the negative value of gso_size minus 1.  We
+	 * use this as the worst case scenerio in which the frag ahead
+	 * of us only provides one byte which is why we are limited to 6
+	 * descriptors for a single transmit as the header and previous
+	 * fragment are already consuming 2 descriptors.
+	 */
+	sum = 1 - skb_shinfo(skb)->gso_size;
+
+	/* Add size of frags 0 through 4 to create our initial sum */
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+
+	/* Walk through fragments adding latest fragment, testing it, and
+	 * then removing stale fragments from the sum.
+	 */
+	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+		int stale_size = skb_frag_size(stale);
+
+		sum += skb_frag_size(frag++);
+
+		/* The stale fragment may present us with a smaller
+		 * descriptor than the actual fragment size. To account
+		 * for that we need to remove all the data on the front and
+		 * figure out what the remainder would be in the last
+		 * descriptor associated with the fragment.
+		 */
+		if (stale_size > I40E_MAX_DATA_PER_TXD) {
+			int align_pad = -(stale->page_offset) &
+					(I40E_MAX_READ_REQ_SIZE - 1);
+
+			sum -= align_pad;
+			stale_size -= align_pad;
+
+			do {
+				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+			} while (stale_size > I40E_MAX_DATA_PER_TXD);
+		}
+
+		/* if sum is negative we failed to make sufficient progress */
+		if (sum < 0)
+			return true;
+
+		if (!nr_frags--)
+			break;
+
+		sum -= stale_size;
+	}
+
+	return false;
+}
+
+/**
+ * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	/* Memory barrier before checking head and tail */
+	smp_mb();
+
+	/* Check again in a case another CPU has just made room available. */
+	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++tx_ring->tx_stats.restart_queue;
+	return 0;
+}
+
+/**
+ * i40evf_tx_map - Build the Tx descriptor
+ * @tx_ring:  ring to send buffer on
+ * @skb:      send buffer
+ * @first:    first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ * @td_cmd:   the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+				 struct i40e_tx_buffer *first, u32 tx_flags,
+				 const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+	unsigned int data_len = skb->data_len;
+	unsigned int size = skb_headlen(skb);
+	struct skb_frag_struct *frag;
+	struct i40e_tx_buffer *tx_bi;
+	struct i40e_tx_desc *tx_desc;
+	u16 i = tx_ring->next_to_use;
+	u32 td_tag = 0;
+	dma_addr_t dma;
+
+	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+			 I40E_TX_FLAGS_VLAN_SHIFT;
+	}
+
+	first->tx_flags = tx_flags;
+
+	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+	tx_desc = I40E_TX_DESC(tx_ring, i);
+	tx_bi = first;
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_bi, len, size);
+		dma_unmap_addr_set(tx_bi, dma, dma);
+
+		/* align size to end of page */
+		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
+		tx_desc->buffer_addr = cpu_to_le64(dma);
+
+		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+			tx_desc->cmd_type_offset_bsz =
+				build_ctob(td_cmd, td_offset,
+					   max_data, td_tag);
+
+			tx_desc++;
+			i++;
+
+			if (i == tx_ring->count) {
+				tx_desc = I40E_TX_DESC(tx_ring, 0);
+				i = 0;
+			}
+
+			dma += max_data;
+			size -= max_data;
+
+			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+			tx_desc->buffer_addr = cpu_to_le64(dma);
+		}
+
+		if (likely(!data_len))
+			break;
+
+		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+							  size, td_tag);
+
+		tx_desc++;
+		i++;
+
+		if (i == tx_ring->count) {
+			tx_desc = I40E_TX_DESC(tx_ring, 0);
+			i = 0;
+		}
+
+		size = skb_frag_size(frag);
+		data_len -= size;
+
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+				       DMA_TO_DEVICE);
+
+		tx_bi = &tx_ring->tx_bi[i];
+	}
+
+	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+
+	tx_ring->next_to_use = i;
+
+	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	/* write last descriptor with RS and EOP bits */
+	td_cmd |= I40E_TXD_CMD;
+	tx_desc->cmd_type_offset_bsz =
+			build_ctob(td_cmd, td_offset, size, td_tag);
+
+	/* Force memory writes to complete before letting h/w know there
+	 * are new descriptors to fetch.
+	 *
+	 * We also use this memory barrier to make certain all of the
+	 * status bits have been updated before next_to_watch is written.
+	 */
+	wmb();
+
+	/* set next_to_watch value indicating a packet is present */
+	first->next_to_watch = tx_desc;
+
+	/* notify HW of packet */
+	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+		writel(i, tx_ring->tail);
+
+		/* we need this if more than one processor can write to our tail
+		 * at a time, it synchronizes IO on IA64/Altix systems
+		 */
+		mmiowb();
+	}
+
+	return;
+
+dma_error:
+	dev_info(tx_ring->dev, "TX DMA map failed\n");
+
+	/* clear dma mappings for failed tx_bi map */
+	for (;;) {
+		tx_bi = &tx_ring->tx_bi[i];
+		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+		if (tx_bi == first)
+			break;
+		if (i == 0)
+			i = tx_ring->count;
+		i--;
+	}
+
+	tx_ring->next_to_use = i;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+					struct i40e_ring *tx_ring)
+{
+	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+	u32 cd_tunneling = 0, cd_l2tag2 = 0;
+	struct i40e_tx_buffer *first;
+	u32 td_offset = 0;
+	u32 tx_flags = 0;
+	__be16 protocol;
+	u32 td_cmd = 0;
+	u8 hdr_len = 0;
+	int tso, count;
+
+	/* prefetch the data, we'll need it later */
+	prefetch(skb->data);
+
+	i40e_trace(xmit_frame_ring, skb, tx_ring);
+
+	count = i40e_xmit_descriptor_count(skb);
+	if (i40e_chk_linearize(skb, count)) {
+		if (__skb_linearize(skb)) {
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
+		count = i40e_txd_use_count(skb->len);
+		tx_ring->tx_stats.tx_linearize++;
+	}
+
+	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+	 *       + 4 desc gap to avoid the cache line where head is,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+		tx_ring->tx_stats.tx_busy++;
+		return NETDEV_TX_BUSY;
+	}
+
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_bi[tx_ring->next_to_use];
+	first->skb = skb;
+	first->bytecount = skb->len;
+	first->gso_segs = 1;
+
+	/* prepare the xmit flags */
+	if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+		goto out_drop;
+
+	/* obtain protocol of skb */
+	protocol = vlan_get_protocol(skb);
+
+	/* setup IPv4/IPv6 offloads */
+	if (protocol == htons(ETH_P_IP))
+		tx_flags |= I40E_TX_FLAGS_IPV4;
+	else if (protocol == htons(ETH_P_IPV6))
+		tx_flags |= I40E_TX_FLAGS_IPV6;
+
+	tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
+
+	if (tso < 0)
+		goto out_drop;
+	else if (tso)
+		tx_flags |= I40E_TX_FLAGS_TSO;
+
+	/* Always offload the checksum, since it's in the data descriptor */
+	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
+				  tx_ring, &cd_tunneling);
+	if (tso < 0)
+		goto out_drop;
+
+	skb_tx_timestamp(skb);
+
+	/* always enable CRC insertion offload */
+	td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+			   cd_tunneling, cd_l2tag2);
+
+	i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+		      td_cmd, td_offset);
+
+	return NETDEV_TX_OK;
+
+out_drop:
+	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
+	return NETDEV_TX_OK;
+}
+
+/**
+ * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb:    send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
+
+	/* hardware can't handle really short frames, hardware padding works
+	 * beyond this point
+	 */
+	if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+		if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+			return NETDEV_TX_OK;
+		skb->len = I40E_MIN_TX_LEN;
+		skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+	}
+
+	return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/iavf/i40e_txrx.h b/drivers/net/ethernet/intel/iavf/i40e_txrx.h
new file mode 100644
index 000000000000..3b5a63b3236e
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_txrx.h
@@ -0,0 +1,524 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
+/* Interrupt Throttling and Rate Limiting Goodies */
+#define I40E_DEFAULT_IRQ_WORK      256
+
+/* The datasheet for the X710 and XL710 indicate that the maximum value for
+ * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
+ * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
+ * the register value which is divided by 2 lets use the actual values and
+ * avoid an excessive amount of translation.
+ */
+#define I40E_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
+#define I40E_ITR_MASK		0x1FFE	/* mask for ITR register value */
+#define I40E_MIN_ITR		     2	/* reg uses 2 usec resolution */
+#define I40E_ITR_100K		    10	/* all values below must be even */
+#define I40E_ITR_50K		    20
+#define I40E_ITR_20K		    50
+#define I40E_ITR_18K		    60
+#define I40E_ITR_8K		   122
+#define I40E_MAX_ITR		  8160	/* maximum value as per datasheet */
+#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
+#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
+
+#define I40E_ITR_RX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
+
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA                  BIT(6)
+#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K              125     /* 8000 ints/sec */
+#define I40E_INTRL_62K             16      /* 62500 ints/sec */
+#define I40E_INTRL_83K             12      /* 83333 ints/sec */
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+	I40E_IDX_ITR0 = 0,
+	I40E_IDX_ITR1 = 1,
+	I40E_IDX_ITR2 = 2,
+	I40E_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR    I40E_IDX_ITR0
+#define I40E_TX_ITR    I40E_IDX_ITR1
+#define I40E_PE_ITR    I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256   256
+#define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
+#define I40E_RXBUFFER_2048  2048
+#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
+#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_RX_DMA_ATTR \
+	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* Attempt to maximize the headroom available for incoming frames.  We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame.  This leaves us with 512 bytes of room.  From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ *	 up negative.  In these cases we should fall back to the legacy
+ *	 receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+
+static inline int i40e_compute_pad(int rx_buf_len)
+{
+	int page_size, pad_size;
+
+	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+	return pad_size;
+}
+
+static inline int i40e_skb_pad(void)
+{
+	int rx_buf_len;
+
+	/* If a 2K buffer cannot handle a standard Ethernet frame then
+	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
+	 *
+	 * For a 3K buffer we need to add enough padding to allow for
+	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
+	 * cache-line alignment.
+	 */
+	if (I40E_2K_TOO_SMALL_WITH_PADDING)
+		rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+	else
+		rx_buf_len = I40E_RXBUFFER_1536;
+
+	/* if needed make room for NET_IP_ALIGN */
+	rx_buf_len -= NET_IP_ALIGN;
+
+	return i40e_compute_pad(rx_buf_len);
+}
+
+#define I40E_SKB_PAD i40e_skb_pad()
+#else
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+				     const u64 stat_err_bits)
+{
+	return !!(rx_desc->wb.qword1.status_error_len &
+		  cpu_to_le64(stat_err_bits));
+}
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE	32	/* Must be power of 2 */
+#define I40E_RX_INCREMENT(r, i) \
+	do {					\
+		(i)++;				\
+		if ((i) == (r)->count)		\
+			i = 0;			\
+		r->next_to_clean = i;		\
+	} while (0)
+
+#define I40E_RX_NEXT_DESC(r, i, n)		\
+	do {					\
+		(i)++;				\
+		if ((i) == (r)->count)		\
+			i = 0;			\
+		(n) = I40E_RX_DESC((r), (i));	\
+	} while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n)		\
+	do {						\
+		I40E_RX_NEXT_DESC((r), (i), (n));	\
+		prefetch((n));				\
+	} while (0)
+
+#define I40E_MAX_BUFFER_TXD	8
+#define I40E_MIN_TX_LEN		17
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE		4096
+#define I40E_MAX_DATA_PER_TXD		(16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+	(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/**
+ * i40e_txd_use_count  - estimate the number of descriptors needed for Tx
+ * @size: transmit request size in bytes
+ *
+ * Due to hardware alignment restrictions (4K alignment), we need to
+ * assume that we can have no more than 12K of data per descriptor, even
+ * though each descriptor can take up to 16K - 1 bytes of aligned memory.
+ * Thus, we need to divide by 12K. But division is slow! Instead,
+ * we decompose the operation into shifts and one relatively cheap
+ * multiply operation.
+ *
+ * To divide by 12K, we first divide by 4K, then divide by 3:
+ *     To divide by 4K, shift right by 12 bits
+ *     To divide by 3, multiply by 85, then divide by 256
+ *     (Divide by 256 is done by shifting right by 8 bits)
+ * Finally, we add one to round up. Because 256 isn't an exact multiple of
+ * 3, we'll underestimate near each multiple of 12K. This is actually more
+ * accurate as we have 4K - 1 of wiggle room that we can fit into the last
+ * segment.  For our purposes this is accurate out to 1M which is orders of
+ * magnitude greater than our largest possible GSO size.
+ *
+ * This would then be implemented as:
+ *     return (((size >> 12) * 85) >> 8) + 1;
+ *
+ * Since multiplication and division are commutative, we can reorder
+ * operations into:
+ *     return ((size * 85) >> 20) + 1;
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+	return ((size * 85) >> 20) + 1;
+}
+
+/* Tx Descriptors needed, worst case */
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
+#define I40E_MIN_DESC_PENDING	4
+
+#define I40E_TX_FLAGS_HW_VLAN		BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN		BIT(2)
+#define I40E_TX_FLAGS_TSO		BIT(3)
+#define I40E_TX_FLAGS_IPV4		BIT(4)
+#define I40E_TX_FLAGS_IPV6		BIT(5)
+#define I40E_TX_FLAGS_FCCRC		BIT(6)
+#define I40E_TX_FLAGS_FSO		BIT(7)
+#define I40E_TX_FLAGS_FD_SB		BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL	BIT(10)
+#define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
+#define I40E_TX_FLAGS_VLAN_SHIFT	16
+
+struct i40e_tx_buffer {
+	struct i40e_tx_desc *next_to_watch;
+	union {
+		struct sk_buff *skb;
+		void *raw_buf;
+	};
+	unsigned int bytecount;
+	unsigned short gso_segs;
+
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(len);
+	u32 tx_flags;
+};
+
+struct i40e_rx_buffer {
+	dma_addr_t dma;
+	struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+	__u32 page_offset;
+#else
+	__u16 page_offset;
+#endif
+	__u16 pagecnt_bias;
+};
+
+struct i40e_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
+	u64 restart_queue;
+	u64 tx_busy;
+	u64 tx_done_old;
+	u64 tx_linearize;
+	u64 tx_force_wb;
+	int prev_pkt_ctr;
+	u64 tx_lost_interrupt;
+};
+
+struct i40e_rx_queue_stats {
+	u64 non_eop_descs;
+	u64 alloc_page_failed;
+	u64 alloc_buff_failed;
+	u64 page_reuse_count;
+	u64 realloc_count;
+};
+
+enum i40e_ring_state_t {
+	__I40E_TX_FDIR_INIT_DONE,
+	__I40E_TX_XPS_INIT_DONE,
+	__I40E_RING_STATE_NBITS /* must be last */
+};
+
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT      0
+#define I40E_RX_DTYPE_HEADER_SPLIT  1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
+#define I40E_RX_SPLIT_L2      0x1
+#define I40E_RX_SPLIT_IP      0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP    0x8
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+	struct i40e_ring *next;		/* pointer to next ring in q_vector */
+	void *desc;			/* Descriptor ring memory */
+	struct device *dev;		/* Used for DMA mapping */
+	struct net_device *netdev;	/* netdev ring maps to */
+	union {
+		struct i40e_tx_buffer *tx_bi;
+		struct i40e_rx_buffer *rx_bi;
+	};
+	DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
+	u16 queue_index;		/* Queue number of ring */
+	u8 dcb_tc;			/* Traffic class of ring */
+	u8 __iomem *tail;
+
+	/* high bit set means dynamic, use accessors routines to read/write.
+	 * hardware only supports 2us resolution for the ITR registers.
+	 * these values always store the USER setting, and must be converted
+	 * before programming to a register.
+	 */
+	u16 itr_setting;
+
+	u16 count;			/* Number of descriptors */
+	u16 reg_idx;			/* HW register index of the ring */
+	u16 rx_buf_len;
+
+	/* used in interrupt processing */
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	u8 atr_sample_rate;
+	u8 atr_count;
+
+	bool ring_active;		/* is ring online or not */
+	bool arm_wb;		/* do something to arm write back */
+	u8 packet_stride;
+
+	u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR		BIT(0)
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED	BIT(1)
+
+	/* stats structs */
+	struct i40e_queue_stats	stats;
+	struct u64_stats_sync syncp;
+	union {
+		struct i40e_tx_queue_stats tx_stats;
+		struct i40e_rx_queue_stats rx_stats;
+	};
+
+	unsigned int size;		/* length of descriptor ring in bytes */
+	dma_addr_t dma;			/* physical address of ring */
+
+	struct i40e_vsi *vsi;		/* Backreference to associated VSI */
+	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
+
+	struct rcu_head rcu;		/* to avoid race on free */
+	u16 next_to_alloc;
+	struct sk_buff *skb;		/* When i40evf_clean_rx_ring_irq() must
+					 * return before it sees the EOP for
+					 * the current packet, we save that skb
+					 * here and resume receiving this
+					 * packet the next time
+					 * i40evf_clean_rx_ring_irq() is called
+					 * for this ring.
+					 */
+} ____cacheline_internodealigned_in_smp;
+
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+{
+	return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+}
+
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+	ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+	ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+#define I40E_ITR_ADAPTIVE_MIN_INC	0x0002
+#define I40E_ITR_ADAPTIVE_MIN_USECS	0x0002
+#define I40E_ITR_ADAPTIVE_MAX_USECS	0x007e
+#define I40E_ITR_ADAPTIVE_LATENCY	0x8000
+#define I40E_ITR_ADAPTIVE_BULK		0x0000
+#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
+
+struct i40e_ring_container {
+	struct i40e_ring *ring;		/* pointer to linked list of ring(s) */
+	unsigned long next_update;	/* jiffies value of next update */
+	unsigned int total_bytes;	/* total bytes processed this int */
+	unsigned int total_packets;	/* total packets processed this int */
+	u16 count;
+	u16 target_itr;			/* target ITR setting for ring(s) */
+	u16 current_itr;		/* current ITR setting for ring(s) */
+};
+
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+	for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+	if (ring->rx_buf_len > (PAGE_SIZE / 2))
+		return 1;
+#endif
+	return 0;
+}
+
+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
+
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
+int i40evf_napi_poll(struct napi_struct *napi, int budget);
+void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
+u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
+int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
+bool __i40evf_chk_linearize(struct sk_buff *skb);
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
+{
+	const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+	int count = 0, size = skb_headlen(skb);
+
+	for (;;) {
+		count += i40e_txd_use_count(size);
+
+		if (!nr_frags--)
+			break;
+
+		size = skb_frag_size(frag++);
+	}
+
+	return count;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __i40evf_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @count:    number of buffers used
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
+{
+	/* Both TSO and single send will work if count is less than 8 */
+	if (likely(count < I40E_MAX_BUFFER_TXD))
+		return false;
+
+	if (skb_is_gso(skb))
+		return __i40evf_chk_linearize(skb);
+
+	/* we can support up to 8 data buffers for a single send */
+	return count != I40E_MAX_BUFFER_TXD;
+}
+/**
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_type.h b/drivers/net/ethernet/intel/iavf/i40e_type.h
new file mode 100644
index 000000000000..8f1344094bc9
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_type.h
@@ -0,0 +1,719 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_devids.h"
+
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
+
+#define I40E_MAX_VSI_QP			16
+#define I40E_MAX_VF_VSI			3
+#define I40E_MAX_CHAINED_RX_BUFFERS	5
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R)	\
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE	0x0
+#define I40E_QTX_CTL_VM_QUEUE	0x1
+#define I40E_QTX_CTL_PF_QUEUE	0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+	I40E_DEBUG_INIT			= 0x00000001,
+	I40E_DEBUG_RELEASE		= 0x00000002,
+
+	I40E_DEBUG_LINK			= 0x00000010,
+	I40E_DEBUG_PHY			= 0x00000020,
+	I40E_DEBUG_HMC			= 0x00000040,
+	I40E_DEBUG_NVM			= 0x00000080,
+	I40E_DEBUG_LAN			= 0x00000100,
+	I40E_DEBUG_FLOW			= 0x00000200,
+	I40E_DEBUG_DCB			= 0x00000400,
+	I40E_DEBUG_DIAG			= 0x00000800,
+	I40E_DEBUG_FD			= 0x00001000,
+	I40E_DEBUG_PACKAGE		= 0x00002000,
+
+	I40E_DEBUG_AQ_MESSAGE		= 0x01000000,
+	I40E_DEBUG_AQ_DESCRIPTOR	= 0x02000000,
+	I40E_DEBUG_AQ_DESC_BUFFER	= 0x04000000,
+	I40E_DEBUG_AQ_COMMAND		= 0x06000000,
+	I40E_DEBUG_AQ			= 0x0F000000,
+
+	I40E_DEBUG_USER			= 0xF0000000,
+
+	I40E_DEBUG_ALL			= 0xFFFFFFFF
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with.  This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed.  For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+	I40E_MAC_UNKNOWN = 0,
+	I40E_MAC_XL710,
+	I40E_MAC_VF,
+	I40E_MAC_X722,
+	I40E_MAC_X722_VF,
+	I40E_MAC_GENERIC,
+};
+
+enum i40e_vsi_type {
+	I40E_VSI_MAIN	= 0,
+	I40E_VSI_VMDQ1	= 1,
+	I40E_VSI_VMDQ2	= 2,
+	I40E_VSI_CTRL	= 3,
+	I40E_VSI_FCOE	= 4,
+	I40E_VSI_MIRROR	= 5,
+	I40E_VSI_SRIOV	= 6,
+	I40E_VSI_FDIR	= 7,
+	I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+	I40E_QUEUE_TYPE_RX = 0,
+	I40E_QUEUE_TYPE_TX,
+	I40E_QUEUE_TYPE_PE_CEQ,
+	I40E_QUEUE_TYPE_UNKNOWN
+};
+
+#define I40E_HW_CAP_MAX_GPIO		30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+	bool dcb;
+	bool fcoe;
+	u32 num_vsis;
+	u32 num_rx_qp;
+	u32 num_tx_qp;
+	u32 base_queue;
+	u32 num_msix_vectors_vf;
+};
+
+struct i40e_mac_info {
+	enum i40e_mac_type type;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+	u8 san_addr[ETH_ALEN];
+	u16 max_fcoeq;
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+	i40e_bus_type_unknown = 0,
+	i40e_bus_type_pci,
+	i40e_bus_type_pcix,
+	i40e_bus_type_pci_express,
+	i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+	i40e_bus_speed_unknown	= 0,
+	i40e_bus_speed_33	= 33,
+	i40e_bus_speed_66	= 66,
+	i40e_bus_speed_100	= 100,
+	i40e_bus_speed_120	= 120,
+	i40e_bus_speed_133	= 133,
+	i40e_bus_speed_2500	= 2500,
+	i40e_bus_speed_5000	= 5000,
+	i40e_bus_speed_8000	= 8000,
+	i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+	i40e_bus_width_unknown	= 0,
+	i40e_bus_width_pcie_x1	= 1,
+	i40e_bus_width_pcie_x2	= 2,
+	i40e_bus_width_pcie_x4	= 4,
+	i40e_bus_width_pcie_x8	= 8,
+	i40e_bus_width_32	= 32,
+	i40e_bus_width_64	= 64,
+	i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+	enum i40e_bus_speed speed;
+	enum i40e_bus_width width;
+	enum i40e_bus_type type;
+
+	u16 func;
+	u16 device;
+	u16 lan_id;
+	u16 bus_id;
+};
+
+#define I40E_MAX_TRAFFIC_CLASS		8
+#define I40E_MAX_USER_PRIORITY		8
+/* Port hardware description */
+struct i40e_hw {
+	u8 __iomem *hw_addr;
+	void *back;
+
+	/* subsystem structs */
+	struct i40e_mac_info mac;
+	struct i40e_bus_info bus;
+
+	/* pci info */
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	u8 revision_id;
+
+	/* capabilities for entire device and PCI func */
+	struct i40e_hw_capabilities dev_caps;
+
+	/* Admin Queue info */
+	struct i40e_adminq_info aq;
+
+	/* debug mask */
+	u32 debug_mask;
+	char err_str[16];
+};
+
+static inline bool i40e_is_vf(struct i40e_hw *hw)
+{
+	return (hw->mac.type == I40E_MAC_VF ||
+		hw->mac.type == I40E_MAC_X722_VF);
+}
+
+struct i40e_driver_version {
+	u8 major_version;
+	u8 minor_version;
+	u8 build_version;
+	u8 subbuild_version;
+	u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+	struct {
+		__le64 pkt_addr; /* Packet buffer address */
+		__le64 hdr_addr; /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			struct {
+				union {
+					__le16 mirroring_status;
+					__le16 fcoe_ctx_id;
+				} mirr_fcoe;
+				__le16 l2tag1;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				__le32 fd_id; /* Flow director filter id */
+				__le32 fcoe_param; /* FCoE DDP Context id */
+			} hi_dword;
+		} qword0;
+		struct {
+			/* ext status/error/pktype/length */
+			__le64 status_error_len;
+		} qword1;
+	} wb;  /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+	struct {
+		__le64  pkt_addr; /* Packet buffer address */
+		__le64  hdr_addr; /* Header buffer address */
+			/* bit 0 of hdr_buffer_addr is DD bit */
+		__le64  rsvd1;
+		__le64  rsvd2;
+	} read;
+	struct {
+		struct {
+			struct {
+				union {
+					__le16 mirroring_status;
+					__le16 fcoe_ctx_id;
+				} mirr_fcoe;
+				__le16 l2tag1;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				__le32 fcoe_param; /* FCoE DDP Context id */
+				/* Flow director filter id in case of
+				 * Programming status desc WB
+				 */
+				__le32 fd_id;
+			} hi_dword;
+		} qword0;
+		struct {
+			/* status/error/pktype/length */
+			__le64 status_error_len;
+		} qword1;
+		struct {
+			__le16 ext_status; /* extended status */
+			__le16 rsvd;
+			__le16 l2tag2_1;
+			__le16 l2tag2_2;
+		} qword2;
+		struct {
+			union {
+				__le32 flex_bytes_lo;
+				__le32 pe_status;
+			} lo_dword;
+			union {
+				__le32 flex_bytes_hi;
+				__le32 fd_id;
+			} hi_dword;
+		} qword3;
+	} wb;  /* writeback */
+};
+
+enum i40e_rx_desc_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_STATUS_DD_SHIFT		= 0,
+	I40E_RX_DESC_STATUS_EOF_SHIFT		= 1,
+	I40E_RX_DESC_STATUS_L2TAG1P_SHIFT	= 2,
+	I40E_RX_DESC_STATUS_L3L4P_SHIFT		= 3,
+	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
+	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
+	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
+	/* Note: Bit 8 is reserved in X710 and XL710 */
+	I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT	= 8,
+	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
+	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
+	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
+	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
+	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
+	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
+	/* Note: For non-tunnel packets INT_UDP_0 is the right status for
+	 * UDP header
+	 */
+	I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT	= 18,
+	I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT	0
+#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
+					 << I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x3UL << \
+					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+				    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
+	I40E_RX_DESC_FLTSTAT_RSV_FD_ID	= 1, /* 16byte desc? FD_ID : RSV */
+	I40E_RX_DESC_FLTSTAT_RSV	= 2,
+	I40E_RX_DESC_FLTSTAT_RSS_HASH	= 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT	19
+#define I40E_RXD_QW1_ERROR_MASK		(0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_ERROR_RXE_SHIFT		= 0,
+	I40E_RX_DESC_ERROR_RECIPE_SHIFT		= 1,
+	I40E_RX_DESC_ERROR_HBO_SHIFT		= 2,
+	I40E_RX_DESC_ERROR_L3L4E_SHIFT		= 3, /* 3 BITS */
+	I40E_RX_DESC_ERROR_IPE_SHIFT		= 3,
+	I40E_RX_DESC_ERROR_L4E_SHIFT		= 4,
+	I40E_RX_DESC_ERROR_EIPE_SHIFT		= 5,
+	I40E_RX_DESC_ERROR_OVERSIZE_SHIFT	= 6,
+	I40E_RX_DESC_ERROR_PPRS_SHIFT		= 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+	I40E_RX_DESC_ERROR_L3L4E_NONE		= 0,
+	I40E_RX_DESC_ERROR_L3L4E_PROT		= 1,
+	I40E_RX_DESC_ERROR_L3L4E_FC		= 2,
+	I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR	= 3,
+	I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN	= 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT	30
+#define I40E_RXD_QW1_PTYPE_MASK		(0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+	I40E_RX_PTYPE_L2_RESERVED			= 0,
+	I40E_RX_PTYPE_L2_MAC_PAY2			= 1,
+	I40E_RX_PTYPE_L2_TIMESYNC_PAY2			= 2,
+	I40E_RX_PTYPE_L2_FIP_PAY2			= 3,
+	I40E_RX_PTYPE_L2_OUI_PAY2			= 4,
+	I40E_RX_PTYPE_L2_MACCNTRL_PAY2			= 5,
+	I40E_RX_PTYPE_L2_LLDP_PAY2			= 6,
+	I40E_RX_PTYPE_L2_ECP_PAY2			= 7,
+	I40E_RX_PTYPE_L2_EVB_PAY2			= 8,
+	I40E_RX_PTYPE_L2_QCN_PAY2			= 9,
+	I40E_RX_PTYPE_L2_EAPOL_PAY2			= 10,
+	I40E_RX_PTYPE_L2_ARP				= 11,
+	I40E_RX_PTYPE_L2_FCOE_PAY3			= 12,
+	I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3		= 13,
+	I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3		= 14,
+	I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3		= 15,
+	I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA		= 16,
+	I40E_RX_PTYPE_L2_FCOE_VFT_PAY3			= 17,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA		= 18,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY			= 19,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP			= 20,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER		= 21,
+	I40E_RX_PTYPE_GRENAT4_MAC_PAY3			= 58,
+	I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4	= 87,
+	I40E_RX_PTYPE_GRENAT6_MAC_PAY3			= 124,
+	I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4	= 153
+};
+
+struct i40e_rx_ptype_decoded {
+	u32 ptype:8;
+	u32 known:1;
+	u32 outer_ip:1;
+	u32 outer_ip_ver:1;
+	u32 outer_frag:1;
+	u32 tunnel_type:3;
+	u32 tunnel_end_prot:2;
+	u32 tunnel_end_frag:1;
+	u32 inner_prot:4;
+	u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+	I40E_RX_PTYPE_OUTER_L2	= 0,
+	I40E_RX_PTYPE_OUTER_IP	= 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+	I40E_RX_PTYPE_OUTER_NONE	= 0,
+	I40E_RX_PTYPE_OUTER_IPV4	= 0,
+	I40E_RX_PTYPE_OUTER_IPV6	= 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+	I40E_RX_PTYPE_NOT_FRAG	= 0,
+	I40E_RX_PTYPE_FRAG	= 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+	I40E_RX_PTYPE_TUNNEL_NONE		= 0,
+	I40E_RX_PTYPE_TUNNEL_IP_IP		= 1,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT		= 2,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC	= 3,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN	= 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+	I40E_RX_PTYPE_TUNNEL_END_NONE	= 0,
+	I40E_RX_PTYPE_TUNNEL_END_IPV4	= 1,
+	I40E_RX_PTYPE_TUNNEL_END_IPV6	= 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+	I40E_RX_PTYPE_INNER_PROT_NONE		= 0,
+	I40E_RX_PTYPE_INNER_PROT_UDP		= 1,
+	I40E_RX_PTYPE_INNER_PROT_TCP		= 2,
+	I40E_RX_PTYPE_INNER_PROT_SCTP		= 3,
+	I40E_RX_PTYPE_INNER_PROT_ICMP		= 4,
+	I40E_RX_PTYPE_INNER_PROT_TIMESYNC	= 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+	I40E_RX_PTYPE_PAYLOAD_LAYER_NONE	= 0,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2	= 1,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3	= 2,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4	= 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT	38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK	(0x3FFFULL << \
+					 I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT	52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK	(0x7FFULL << \
+					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT	= 0,
+	I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT	= 1,
+	I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT	= 2, /* 2 BITS */
+	I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT	= 4, /* 2 BITS */
+	I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT	= 9,
+	I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT	= 10,
+	I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT	= 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_PE_STATUS_QPID_SHIFT	= 0, /* 18 BITS */
+	I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT	= 0, /* 16 BITS */
+	I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT	= 16, /* 8 BITS */
+	I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT	= 24,
+	I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT	= 25,
+	I40E_RX_DESC_PE_STATUS_PORTV_SHIFT	= 26,
+	I40E_RX_DESC_PE_STATUS_URG_SHIFT	= 27,
+	I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT	= 28,
+	I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT	= 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT		38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH			0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT	2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK	(0x7UL << \
+				I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT	19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK		(0x3FUL << \
+				I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_PROG_STATUS_DESC_DD_SHIFT	= 0,
+	I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT	= 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+	I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS	= 1,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS	= 2,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS	= 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT	= 0,
+	I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT	= 1,
+	I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT	= 2,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT	= 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+	__le64 buffer_addr; /* Address of descriptor's data buf */
+	__le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT	0
+#define I40E_TXD_QW1_DTYPE_MASK		(0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+	I40E_TX_DESC_DTYPE_DATA		= 0x0,
+	I40E_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
+	I40E_TX_DESC_DTYPE_CONTEXT	= 0x1,
+	I40E_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	I40E_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
+	I40E_TX_DESC_DTYPE_DDP_CTX	= 0x9,
+	I40E_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
+	I40E_TX_DESC_DTYPE_FLEX_CTX_1	= 0xC,
+	I40E_TX_DESC_DTYPE_FLEX_CTX_2	= 0xD,
+	I40E_TX_DESC_DTYPE_DESC_DONE	= 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT	4
+#define I40E_TXD_QW1_CMD_MASK	(0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+	I40E_TX_DESC_CMD_EOP			= 0x0001,
+	I40E_TX_DESC_CMD_RS			= 0x0002,
+	I40E_TX_DESC_CMD_ICRC			= 0x0004,
+	I40E_TX_DESC_CMD_IL2TAG1		= 0x0008,
+	I40E_TX_DESC_CMD_DUMMY			= 0x0010,
+	I40E_TX_DESC_CMD_IIPT_NONIP		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV6		= 0x0020, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV4		= 0x0040, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV4_CSUM		= 0x0060, /* 2 BITS */
+	I40E_TX_DESC_CMD_FCOET			= 0x0080,
+	I40E_TX_DESC_CMD_L4T_EOFT_UNK		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_TCP		= 0x0100, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_SCTP		= 0x0200, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_UDP		= 0x0300, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_N		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_T		= 0x0100, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI	= 0x0200, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_A		= 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT	16
+#define I40E_TXD_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					 I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+	/* Note: These are predefined bit offsets */
+	I40E_TX_DESC_LENGTH_MACLEN_SHIFT	= 0, /* 7 BITS */
+	I40E_TX_DESC_LENGTH_IPLEN_SHIFT		= 7, /* 7 BITS */
+	I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT	= 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT	34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK	(0x3FFFULL << \
+					 I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT	48
+#define I40E_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+	__le32 tunneling_params;
+	__le16 l2tag2;
+	__le16 rsvd;
+	__le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT	4
+#define I40E_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+	I40E_TX_CTX_DESC_TSO		= 0x01,
+	I40E_TX_CTX_DESC_TSYN		= 0x02,
+	I40E_TX_CTX_DESC_IL2TAG2	= 0x04,
+	I40E_TX_CTX_DESC_IL2TAG2_IL2H	= 0x08,
+	I40E_TX_CTX_DESC_SWTCH_NOTAG	= 0x00,
+	I40E_TX_CTX_DESC_SWTCH_UPLINK	= 0x10,
+	I40E_TX_CTX_DESC_SWTCH_LOCAL	= 0x20,
+	I40E_TX_CTX_DESC_SWTCH_VSI	= 0x30,
+	I40E_TX_CTX_DESC_SWPE		= 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT	30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK	(0x3FFFFULL << \
+					 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT	50
+#define I40E_TXD_CTX_QW1_MSS_MASK	(0x3FFFULL << \
+					 I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT	50
+#define I40E_TXD_CTX_QW1_VSI_MASK	(0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT	0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK	(0x3ULL << \
+					 I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+	I40E_TX_CTX_EXT_IP_NONE		= 0x0,
+	I40E_TX_CTX_EXT_IP_IPV6		= 0x1,
+	I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM	= 0x2,
+	I40E_TX_CTX_EXT_IP_IPV4		= 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT	2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK	(0x3FULL << \
+					 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT	9
+#define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING	BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+				       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT	12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK	(0X7FULL << \
+					 I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT	19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK	(0xFULL << \
+					 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT	23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+	/* Note: Values 0-28 are reserved for future use.
+	 * Value 29, 30, 32 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
+	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK	= 32,
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,
+	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
+	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
+	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
+	/* Note: Values 37-38 are reserved for future use.
+	 * Value 39, 40, 42 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
+	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK	= 42,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43,
+	I40E_FILTER_PCTYPE_NONF_IPV6_SCTP		= 44,
+	I40E_FILTER_PCTYPE_NONF_IPV6_OTHER		= 45,
+	I40E_FILTER_PCTYPE_FRAG_IPV6			= 46,
+	/* Note: Value 47 is reserved for future use */
+	I40E_FILTER_PCTYPE_FCOE_OX			= 48,
+	I40E_FILTER_PCTYPE_FCOE_RX			= 49,
+	I40E_FILTER_PCTYPE_FCOE_OTHER			= 50,
+	/* Note: Values 51-62 are reserved for future use */
+	I40E_FILTER_PCTYPE_L2_PAYLOAD			= 63,
+};
+
+
+struct i40e_vsi_context {
+	u16 seid;
+	u16 uplink_seid;
+	u16 vsi_number;
+	u16 vsis_allocated;
+	u16 vsis_unallocated;
+	u16 flags;
+	u8 pf_num;
+	u8 vf_num;
+	u8 connection_type;
+	struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+	u16 seid;
+	u16 uplink_seid;
+	u16 veb_number;
+	u16 vebs_allocated;
+	u16 vebs_unallocated;
+	u16 flags;
+	struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+	u64 rx_bytes;			/* gorc */
+	u64 rx_unicast;			/* uprc */
+	u64 rx_multicast;		/* mprc */
+	u64 rx_broadcast;		/* bprc */
+	u64 rx_discards;		/* rdpc */
+	u64 rx_unknown_protocol;	/* rupp */
+	u64 tx_bytes;			/* gotc */
+	u64 tx_unicast;			/* uptc */
+	u64 tx_multicast;		/* mptc */
+	u64 tx_broadcast;		/* bptc */
+	u64 tx_discards;		/* tdpc */
+	u64 tx_errors;			/* tepc */
+};
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40evf.h b/drivers/net/ethernet/intel/iavf/i40evf.h
new file mode 100644
index 000000000000..96e537a35000
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40evf.h
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40EVF_H_
+#define _I40EVF_H_
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/socket.h>
+#include <linux/jiffies.h>
+#include <net/ip6_checksum.h>
+#include <net/pkt_cls.h>
+#include <net/udp.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "i40e_type.h"
+#include <linux/avf/virtchnl.h>
+#include "i40e_txrx.h"
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define PFX "i40evf: "
+
+/* VSI state flags shared with common code */
+enum i40evf_vsi_state_t {
+	__I40E_VSI_DOWN,
+	/* This must be last as it determines the size of the BITMAP */
+	__I40E_VSI_STATE_SIZE__,
+};
+
+/* dummy struct to make common code less painful */
+struct i40e_vsi {
+	struct i40evf_adapter *back;
+	struct net_device *netdev;
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	u16 seid;
+	u16 id;
+	DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
+	int base_vector;
+	u16 work_limit;
+	u16 qs_handle;
+	void *priv;     /* client driver data reference. */
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40EVF_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+#define I40EVF_DEFAULT_TXD	512
+#define I40EVF_DEFAULT_RXD	512
+#define I40EVF_MAX_TXD		4096
+#define I40EVF_MIN_TXD		64
+#define I40EVF_MAX_RXD		4096
+#define I40EVF_MIN_RXD		64
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE	32
+#define I40EVF_MAX_AQ_BUF_SIZE	4096
+#define I40EVF_AQ_LEN		32
+#define I40EVF_AQ_MAX_ERR	20 /* times to try before resetting AQ */
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+	(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define I40EVF_MAX_REQ_QUEUES 4
+
+#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
+#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40EVF_MBPS_DIVISOR	125000 /* divisor to convert to Mbps */
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct i40e_q_vector {
+	struct i40evf_adapter *adapter;
+	struct i40e_vsi *vsi;
+	struct napi_struct napi;
+	struct i40e_ring_container rx;
+	struct i40e_ring_container tx;
+	u32 ring_mask;
+	u8 itr_countdown;	/* when 0 should adjust adaptive ITR */
+	u8 num_ringpairs;	/* total number of ring pairs in vector */
+	u16 v_idx;		/* index in the vsi->q_vector array. */
+	u16 reg_idx;		/* register index of the interrupt */
+	char name[IFNAMSIZ + 15];
+	bool arm_wb_state;
+	cpumask_t affinity_mask;
+	struct irq_affinity_notify affinity_notify;
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways.  The lowest value
+ * supported by all of the i40e hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define I40EVF_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define I40EVF_RX_DESC_ADV(R, i)	\
+	(&(((union i40e_adv_rx_desc *)((R).desc))[i]))
+#define I40EVF_TX_DESC_ADV(R, i)	\
+	(&(((union i40e_adv_tx_desc *)((R).desc))[i]))
+#define I40EVF_TX_CTXTDESC_ADV(R, i)	\
+	(&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
+
+#define OTHER_VECTOR 1
+#define NONQ_VECS (OTHER_VECTOR)
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
+
+#define I40EVF_QUEUE_END_OF_LIST 0x7FF
+#define I40EVF_FREE_VECTOR 0x7FFF
+struct i40evf_mac_filter {
+	struct list_head list;
+	u8 macaddr[ETH_ALEN];
+	bool remove;		/* filter needs to be removed */
+	bool add;		/* filter needs to be added */
+};
+
+struct i40evf_vlan_filter {
+	struct list_head list;
+	u16 vlan;
+	bool remove;		/* filter needs to be removed */
+	bool add;		/* filter needs to be added */
+};
+
+#define I40EVF_MAX_TRAFFIC_CLASS	4
+/* State of traffic class creation */
+enum i40evf_tc_state_t {
+	__I40EVF_TC_INVALID, /* no traffic class, default state */
+	__I40EVF_TC_RUNNING, /* traffic classes have been created */
+};
+
+/* channel info */
+struct i40evf_channel_config {
+	struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS];
+	enum i40evf_tc_state_t state;
+	u8 total_qps;
+};
+
+/* State of cloud filter */
+enum i40evf_cloud_filter_state_t {
+	__I40EVF_CF_INVALID,	 /* cloud filter not added */
+	__I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */
+	__I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */
+	__I40EVF_CF_ACTIVE,	 /* cloud filter is active */
+};
+
+/* Driver state. The order of these is important! */
+enum i40evf_state_t {
+	__I40EVF_STARTUP,		/* driver loaded, probe complete */
+	__I40EVF_REMOVE,		/* driver is being unloaded */
+	__I40EVF_INIT_VERSION_CHECK,	/* aq msg sent, awaiting reply */
+	__I40EVF_INIT_GET_RESOURCES,	/* aq msg sent, awaiting reply */
+	__I40EVF_INIT_SW,		/* got resources, setting up structs */
+	__I40EVF_RESETTING,		/* in reset */
+	/* Below here, watchdog is running */
+	__I40EVF_DOWN,			/* ready, can be opened */
+	__I40EVF_DOWN_PENDING,		/* descending, waiting for watchdog */
+	__I40EVF_TESTING,		/* in ethtool self-test */
+	__I40EVF_RUNNING,		/* opened, working */
+};
+
+enum i40evf_critical_section_t {
+	__I40EVF_IN_CRITICAL_TASK,	/* cannot be interrupted */
+	__I40EVF_IN_CLIENT_TASK,
+	__I40EVF_IN_REMOVE_TASK,	/* device being removed */
+};
+
+#define I40EVF_CLOUD_FIELD_OMAC		0x01
+#define I40EVF_CLOUD_FIELD_IMAC		0x02
+#define I40EVF_CLOUD_FIELD_IVLAN	0x04
+#define I40EVF_CLOUD_FIELD_TEN_ID	0x08
+#define I40EVF_CLOUD_FIELD_IIP		0x10
+
+#define I40EVF_CF_FLAGS_OMAC	I40EVF_CLOUD_FIELD_OMAC
+#define I40EVF_CF_FLAGS_IMAC	I40EVF_CLOUD_FIELD_IMAC
+#define I40EVF_CF_FLAGS_IMAC_IVLAN	(I40EVF_CLOUD_FIELD_IMAC |\
+					 I40EVF_CLOUD_FIELD_IVLAN)
+#define I40EVF_CF_FLAGS_IMAC_TEN_ID	(I40EVF_CLOUD_FIELD_IMAC |\
+					 I40EVF_CLOUD_FIELD_TEN_ID)
+#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC	(I40EVF_CLOUD_FIELD_OMAC |\
+						 I40EVF_CLOUD_FIELD_IMAC |\
+						 I40EVF_CLOUD_FIELD_TEN_ID)
+#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID	(I40EVF_CLOUD_FIELD_IMAC |\
+						 I40EVF_CLOUD_FIELD_IVLAN |\
+						 I40EVF_CLOUD_FIELD_TEN_ID)
+#define I40EVF_CF_FLAGS_IIP	I40E_CLOUD_FIELD_IIP
+
+/* bookkeeping of cloud filters */
+struct i40evf_cloud_filter {
+	enum i40evf_cloud_filter_state_t state;
+	struct list_head list;
+	struct virtchnl_filter f;
+	unsigned long cookie;
+	bool del;		/* filter needs to be deleted */
+	bool add;		/* filter needs to be added */
+};
+
+/* board specific private data structure */
+struct i40evf_adapter {
+	struct timer_list watchdog_timer;
+	struct work_struct reset_task;
+	struct work_struct adminq_task;
+	struct delayed_work client_task;
+	struct delayed_work init_task;
+	wait_queue_head_t down_waitqueue;
+	struct i40e_q_vector *q_vectors;
+	struct list_head vlan_filter_list;
+	struct list_head mac_filter_list;
+	/* Lock to protect accesses to MAC and VLAN lists */
+	spinlock_t mac_vlan_list_lock;
+	char misc_vector_name[IFNAMSIZ + 9];
+	int num_active_queues;
+	int num_req_queues;
+
+	/* TX */
+	struct i40e_ring *tx_rings;
+	u32 tx_timeout_count;
+	u32 tx_desc_count;
+
+	/* RX */
+	struct i40e_ring *rx_rings;
+	u64 hw_csum_rx_error;
+	u32 rx_desc_count;
+	int num_msix_vectors;
+	int num_iwarp_msix;
+	int iwarp_base_vector;
+	u32 client_pending;
+	struct i40e_client_instance *cinst;
+	struct msix_entry *msix_entries;
+
+	u32 flags;
+#define I40EVF_FLAG_RX_CSUM_ENABLED		BIT(0)
+#define I40EVF_FLAG_PF_COMMS_FAILED		BIT(3)
+#define I40EVF_FLAG_RESET_PENDING		BIT(4)
+#define I40EVF_FLAG_RESET_NEEDED		BIT(5)
+#define I40EVF_FLAG_WB_ON_ITR_CAPABLE		BIT(6)
+#define I40EVF_FLAG_ADDR_SET_BY_PF		BIT(8)
+#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED	BIT(9)
+#define I40EVF_FLAG_CLIENT_NEEDS_OPEN		BIT(10)
+#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE		BIT(11)
+#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS	BIT(12)
+#define I40EVF_FLAG_PROMISC_ON			BIT(13)
+#define I40EVF_FLAG_ALLMULTI_ON			BIT(14)
+#define I40EVF_FLAG_LEGACY_RX			BIT(15)
+#define I40EVF_FLAG_REINIT_ITR_NEEDED		BIT(16)
+#define I40EVF_FLAG_QUEUES_DISABLED		BIT(17)
+/* duplicates for common code */
+#define I40E_FLAG_DCB_ENABLED			0
+#define I40E_FLAG_RX_CSUM_ENABLED		I40EVF_FLAG_RX_CSUM_ENABLED
+#define I40E_FLAG_LEGACY_RX			I40EVF_FLAG_LEGACY_RX
+	/* flags for admin queue service task */
+	u32 aq_required;
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES		BIT(0)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES		BIT(1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER		BIT(2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER		BIT(3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER		BIT(4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER		BIT(5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		BIT(6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS		BIT(7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET		BIT(8)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS		BIT(9) /* direct AQ config */
+#define I40EVF_FLAG_AQ_GET_CONFIG		BIT(10)
+/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
+#define I40EVF_FLAG_AQ_GET_HENA			BIT(11)
+#define I40EVF_FLAG_AQ_SET_HENA			BIT(12)
+#define I40EVF_FLAG_AQ_SET_RSS_KEY		BIT(13)
+#define I40EVF_FLAG_AQ_SET_RSS_LUT		BIT(14)
+#define I40EVF_FLAG_AQ_REQUEST_PROMISC		BIT(15)
+#define I40EVF_FLAG_AQ_RELEASE_PROMISC		BIT(16)
+#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI		BIT(17)
+#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI		BIT(18)
+#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING	BIT(19)
+#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING	BIT(20)
+#define I40EVF_FLAG_AQ_ENABLE_CHANNELS		BIT(21)
+#define I40EVF_FLAG_AQ_DISABLE_CHANNELS		BIT(22)
+#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER		BIT(23)
+#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER		BIT(24)
+
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+
+	struct i40e_hw hw; /* defined in i40e_type.h */
+
+	enum i40evf_state_t state;
+	unsigned long crit_section;
+
+	struct work_struct watchdog_task;
+	bool netdev_registered;
+	bool link_up;
+	enum virtchnl_link_speed link_speed;
+	enum virtchnl_ops current_op;
+#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
+			    (_a)->vf_res->vf_cap_flags & \
+				VIRTCHNL_VF_OFFLOAD_IWARP : \
+			    0)
+#define CLIENT_ENABLED(_a) ((_a)->cinst)
+/* RSS by the PF should be preferred over RSS via other methods. */
+#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \
+		    VIRTCHNL_VF_OFFLOAD_RSS_PF)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \
+		    VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \
+		       (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+			VIRTCHNL_VF_OFFLOAD_RSS_PF)))
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
+			  VIRTCHNL_VF_OFFLOAD_VLAN)
+	struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+	struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+	struct virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+		       ((_a)->pf_version.minor == 1))
+	u16 msg_enable;
+	struct i40e_eth_stats current_stats;
+	struct i40e_vsi vsi;
+	u32 aq_wait_count;
+	/* RSS stuff */
+	u64 hena;
+	u16 rss_key_size;
+	u16 rss_lut_size;
+	u8 *rss_key;
+	u8 *rss_lut;
+	/* ADQ related members */
+	struct i40evf_channel_config ch_config;
+	u8 num_tc;
+	struct list_head cloud_filter_list;
+	/* lock to protest access to the cloud filter list */
+	spinlock_t cloud_filter_list_lock;
+	u16 num_cloud_filters;
+};
+
+
+/* Ethtool Private Flags */
+
+/* lan device */
+struct i40e_device {
+	struct list_head list;
+	struct i40evf_adapter *vf;
+};
+
+/* needed by i40evf_ethtool.c */
+extern char i40evf_driver_name[];
+extern const char i40evf_driver_version[];
+
+int i40evf_up(struct i40evf_adapter *adapter);
+void i40evf_down(struct i40evf_adapter *adapter);
+int i40evf_process_config(struct i40evf_adapter *adapter);
+void i40evf_schedule_reset(struct i40evf_adapter *adapter);
+void i40evf_reset(struct i40evf_adapter *adapter);
+void i40evf_set_ethtool_ops(struct net_device *netdev);
+void i40evf_update_stats(struct i40evf_adapter *adapter);
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
+void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
+
+void i40e_napi_add_all(struct i40evf_adapter *adapter);
+void i40e_napi_del_all(struct i40evf_adapter *adapter);
+
+int i40evf_send_api_ver(struct i40evf_adapter *adapter);
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter);
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter);
+int i40evf_get_vf_config(struct i40evf_adapter *adapter);
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush);
+void i40evf_configure_queues(struct i40evf_adapter *adapter);
+void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
+void i40evf_enable_queues(struct i40evf_adapter *adapter);
+void i40evf_disable_queues(struct i40evf_adapter *adapter);
+void i40evf_map_queues(struct i40evf_adapter *adapter);
+int i40evf_request_queues(struct i40evf_adapter *adapter, int num);
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_add_vlans(struct i40evf_adapter *adapter);
+void i40evf_del_vlans(struct i40evf_adapter *adapter);
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
+void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_request_reset(struct i40evf_adapter *adapter);
+void i40evf_get_hena(struct i40evf_adapter *adapter);
+void i40evf_set_hena(struct i40evf_adapter *adapter);
+void i40evf_set_rss_key(struct i40evf_adapter *adapter);
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
+void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter);
+void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter);
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+				enum virtchnl_ops v_opcode,
+				i40e_status v_retval, u8 *msg, u16 msglen);
+int i40evf_config_rss(struct i40evf_adapter *adapter);
+int i40evf_lan_add_device(struct i40evf_adapter *adapter);
+int i40evf_lan_del_device(struct i40evf_adapter *adapter);
+void i40evf_client_subtask(struct i40evf_adapter *adapter);
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
+void i40evf_notify_client_open(struct i40e_vsi *vsi);
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
+void i40evf_enable_channels(struct i40evf_adapter *adapter);
+void i40evf_disable_channels(struct i40evf_adapter *adapter);
+void i40evf_add_cloud_filter(struct i40evf_adapter *adapter);
+void i40evf_del_cloud_filter(struct i40evf_adapter *adapter);
+#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40evf_client.c b/drivers/net/ethernet/intel/iavf/i40evf_client.c
new file mode 100644
index 000000000000..3cc9d60d0d72
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40evf_client.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+#include "i40evf_client.h"
+
+static
+const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
+static struct i40e_client *vf_registered_client;
+static LIST_HEAD(i40evf_devices);
+static DEFINE_MUTEX(i40evf_device_mutex);
+
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+				       struct i40e_client *client,
+				       u8 *msg, u16 len);
+
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+				      struct i40e_client *client,
+				      struct i40e_qvlist_info *qvlist_info);
+
+static struct i40e_ops i40evf_lan_ops = {
+	.virtchnl_send = i40evf_client_virtchnl_send,
+	.setup_qvlist = i40evf_client_setup_qvlist,
+};
+
+/**
+ * i40evf_client_get_params - retrieve relevant client parameters
+ * @vsi: VSI with parameters
+ * @params: client param struct
+ **/
+static
+void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+	int i;
+
+	memset(params, 0, sizeof(struct i40e_params));
+	params->mtu = vsi->netdev->mtu;
+	params->link_up = vsi->back->link_up;
+
+	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+		params->qos.prio_qos[i].tc = 0;
+		params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
+	}
+}
+
+/**
+ * i40evf_notify_client_message - call the client message receive callback
+ * @vsi: the VSI associated with this client
+ * @msg: message buffer
+ * @len: length of message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
+{
+	struct i40e_client_instance *cinst;
+
+	if (!vsi)
+		return;
+
+	cinst = vsi->back->cinst;
+	if (!cinst || !cinst->client || !cinst->client->ops ||
+	    !cinst->client->ops->virtchnl_receive) {
+		dev_dbg(&vsi->back->pdev->dev,
+			"Cannot locate client instance virtchnl_receive function\n");
+		return;
+	}
+	cinst->client->ops->virtchnl_receive(&cinst->lan_info,  cinst->client,
+					     msg, len);
+}
+
+/**
+ * i40evf_notify_client_l2_params - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
+{
+	struct i40e_client_instance *cinst;
+	struct i40e_params params;
+
+	if (!vsi)
+		return;
+
+	cinst = vsi->back->cinst;
+
+	if (!cinst || !cinst->client || !cinst->client->ops ||
+	    !cinst->client->ops->l2_param_change) {
+		dev_dbg(&vsi->back->pdev->dev,
+			"Cannot locate client instance l2_param_change function\n");
+		return;
+	}
+	i40evf_client_get_params(vsi, &params);
+	cinst->lan_info.params = params;
+	cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
+					    &params);
+}
+
+/**
+ * i40evf_notify_client_open - call the client open callback
+ * @vsi: the VSI with netdev opened
+ *
+ * If there is a client to this netdev, call the client with open
+ **/
+void i40evf_notify_client_open(struct i40e_vsi *vsi)
+{
+	struct i40evf_adapter *adapter = vsi->back;
+	struct i40e_client_instance *cinst = adapter->cinst;
+	int ret;
+
+	if (!cinst || !cinst->client || !cinst->client->ops ||
+	    !cinst->client->ops->open) {
+		dev_dbg(&vsi->back->pdev->dev,
+			"Cannot locate client instance open function\n");
+		return;
+	}
+	if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
+		ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
+		if (!ret)
+			set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+	}
+}
+
+/**
+ * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * @ldev: pointer to L2 context.
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_release_qvlist(struct i40e_info *ldev)
+{
+	struct i40evf_adapter *adapter = ldev->vf;
+	i40e_status err;
+
+	if (adapter->aq_required)
+		return -EAGAIN;
+
+	err = i40e_aq_send_msg_to_pf(&adapter->hw,
+			VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+			I40E_SUCCESS, NULL, 0, NULL);
+
+	if (err)
+		dev_err(&adapter->pdev->dev,
+			"Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
+			err, adapter->hw.aq.asq_last_status);
+
+	return err;
+}
+
+/**
+ * i40evf_notify_client_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
+{
+	struct i40evf_adapter *adapter = vsi->back;
+	struct i40e_client_instance *cinst = adapter->cinst;
+
+	if (!cinst || !cinst->client || !cinst->client->ops ||
+	    !cinst->client->ops->close) {
+		dev_dbg(&vsi->back->pdev->dev,
+			"Cannot locate client instance close function\n");
+		return;
+	}
+	cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
+	i40evf_client_release_qvlist(&cinst->lan_info);
+	clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+}
+
+/**
+ * i40evf_client_add_instance - add a client instance to the instance list
+ * @adapter: pointer to the board struct
+ *
+ * Returns cinst ptr on success, NULL on failure
+ **/
+static struct i40e_client_instance *
+i40evf_client_add_instance(struct i40evf_adapter *adapter)
+{
+	struct i40e_client_instance *cinst = NULL;
+	struct i40e_vsi *vsi = &adapter->vsi;
+	struct netdev_hw_addr *mac = NULL;
+	struct i40e_params params;
+
+	if (!vf_registered_client)
+		goto out;
+
+	if (adapter->cinst) {
+		cinst = adapter->cinst;
+		goto out;
+	}
+
+	cinst = kzalloc(sizeof(*cinst), GFP_KERNEL);
+	if (!cinst)
+		goto out;
+
+	cinst->lan_info.vf = (void *)adapter;
+	cinst->lan_info.netdev = vsi->netdev;
+	cinst->lan_info.pcidev = adapter->pdev;
+	cinst->lan_info.fid = 0;
+	cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
+	cinst->lan_info.hw_addr = adapter->hw.hw_addr;
+	cinst->lan_info.ops = &i40evf_lan_ops;
+	cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
+	cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
+	cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+	i40evf_client_get_params(vsi, &params);
+	cinst->lan_info.params = params;
+	set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
+
+	cinst->lan_info.msix_count = adapter->num_iwarp_msix;
+	cinst->lan_info.msix_entries =
+			&adapter->msix_entries[adapter->iwarp_base_vector];
+
+	mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
+			       struct netdev_hw_addr, list);
+	if (mac)
+		ether_addr_copy(cinst->lan_info.lanmac, mac->addr);
+	else
+		dev_err(&adapter->pdev->dev, "MAC address list is empty!\n");
+
+	cinst->client = vf_registered_client;
+	adapter->cinst = cinst;
+out:
+	return cinst;
+}
+
+/**
+ * i40evf_client_del_instance - removes a client instance from the list
+ * @adapter: pointer to the board struct
+ *
+ **/
+static
+void i40evf_client_del_instance(struct i40evf_adapter *adapter)
+{
+	kfree(adapter->cinst);
+	adapter->cinst = NULL;
+}
+
+/**
+ * i40evf_client_subtask - client maintenance work
+ * @adapter: board private structure
+ **/
+void i40evf_client_subtask(struct i40evf_adapter *adapter)
+{
+	struct i40e_client *client = vf_registered_client;
+	struct i40e_client_instance *cinst;
+	int ret = 0;
+
+	if (adapter->state < __I40EVF_DOWN)
+		return;
+
+	/* first check client is registered */
+	if (!client)
+		return;
+
+	/* Add the client instance to the instance list */
+	cinst = i40evf_client_add_instance(adapter);
+	if (!cinst)
+		return;
+
+	dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
+		 client->name);
+
+	if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+		/* Send an Open request to the client */
+
+		if (client->ops && client->ops->open)
+			ret = client->ops->open(&cinst->lan_info, client);
+		if (!ret)
+			set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+				&cinst->state);
+		else
+			/* remove client instance */
+			i40evf_client_del_instance(adapter);
+	}
+}
+
+/**
+ * i40evf_lan_add_device - add a lan device struct to the list of lan devices
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40evf_lan_add_device(struct i40evf_adapter *adapter)
+{
+	struct i40e_device *ldev;
+	int ret = 0;
+
+	mutex_lock(&i40evf_device_mutex);
+	list_for_each_entry(ldev, &i40evf_devices, list) {
+		if (ldev->vf == adapter) {
+			ret = -EEXIST;
+			goto out;
+		}
+	}
+	ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+	if (!ldev) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ldev->vf = adapter;
+	INIT_LIST_HEAD(&ldev->list);
+	list_add(&ldev->list, &i40evf_devices);
+	dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+		 adapter->hw.bus.bus_id, adapter->hw.bus.device,
+		 adapter->hw.bus.func);
+
+	/* Since in some cases register may have happened before a device gets
+	 * added, we can schedule a subtask to go initiate the clients.
+	 */
+	adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+
+out:
+	mutex_unlock(&i40evf_device_mutex);
+	return ret;
+}
+
+/**
+ * i40evf_lan_del_device - removes a lan device from the device list
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_lan_del_device(struct i40evf_adapter *adapter)
+{
+	struct i40e_device *ldev, *tmp;
+	int ret = -ENODEV;
+
+	mutex_lock(&i40evf_device_mutex);
+	list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
+		if (ldev->vf == adapter) {
+			dev_info(&adapter->pdev->dev,
+				 "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+				 adapter->hw.bus.bus_id, adapter->hw.bus.device,
+				 adapter->hw.bus.func);
+			list_del(&ldev->list);
+			kfree(ldev);
+			ret = 0;
+			break;
+		}
+	}
+
+	mutex_unlock(&i40evf_device_mutex);
+	return ret;
+}
+
+/**
+ * i40evf_client_release - release client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_release(struct i40e_client *client)
+{
+	struct i40e_client_instance *cinst;
+	struct i40e_device *ldev;
+	struct i40evf_adapter *adapter;
+
+	mutex_lock(&i40evf_device_mutex);
+	list_for_each_entry(ldev, &i40evf_devices, list) {
+		adapter = ldev->vf;
+		cinst = adapter->cinst;
+		if (!cinst)
+			continue;
+		if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+			if (client->ops && client->ops->close)
+				client->ops->close(&cinst->lan_info, client,
+						   false);
+			i40evf_client_release_qvlist(&cinst->lan_info);
+			clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+
+			dev_warn(&adapter->pdev->dev,
+				 "Client %s instance closed\n", client->name);
+		}
+		/* delete the client instance */
+		i40evf_client_del_instance(adapter);
+		dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
+			 client->name);
+	}
+	mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_prepare - prepare client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_prepare(struct i40e_client *client)
+{
+	struct i40e_device *ldev;
+	struct i40evf_adapter *adapter;
+
+	mutex_lock(&i40evf_device_mutex);
+	list_for_each_entry(ldev, &i40evf_devices, list) {
+		adapter = ldev->vf;
+		/* Signal the watchdog to service the client */
+		adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+	}
+	mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_virtchnl_send - send a message to the PF instance
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+				       struct i40e_client *client,
+				       u8 *msg, u16 len)
+{
+	struct i40evf_adapter *adapter = ldev->vf;
+	i40e_status err;
+
+	if (adapter->aq_required)
+		return -EAGAIN;
+
+	err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
+				     I40E_SUCCESS, msg, len, NULL);
+	if (err)
+		dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
+			err, adapter->hw.aq.asq_last_status);
+
+	return err;
+}
+
+/**
+ * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qvlist_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+				      struct i40e_client *client,
+				      struct i40e_qvlist_info *qvlist_info)
+{
+	struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
+	struct i40evf_adapter *adapter = ldev->vf;
+	struct i40e_qv_info *qv_info;
+	i40e_status err;
+	u32 v_idx, i;
+	u32 msg_size;
+
+	if (adapter->aq_required)
+		return -EAGAIN;
+
+	/* A quick check on whether the vectors belong to the client */
+	for (i = 0; i < qvlist_info->num_vectors; i++) {
+		qv_info = &qvlist_info->qv_info[i];
+		if (!qv_info)
+			continue;
+		v_idx = qv_info->v_idx;
+		if ((v_idx >=
+		    (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
+		    (v_idx < adapter->iwarp_base_vector))
+			return -EINVAL;
+	}
+
+	v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
+	msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) +
+			(sizeof(struct virtchnl_iwarp_qv_info) *
+			(v_qvlist_info->num_vectors - 1));
+
+	adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+	err = i40e_aq_send_msg_to_pf(&adapter->hw,
+			VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+			I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
+
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
+			err, adapter->hw.aq.asq_last_status);
+		goto out;
+	}
+
+	err = -EBUSY;
+	for (i = 0; i < 5; i++) {
+		msleep(100);
+		if (!(adapter->client_pending &
+		      BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+			err = 0;
+			break;
+		}
+	}
+out:
+	return err;
+}
+
+/**
+ * i40evf_register_client - Register a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_register_client(struct i40e_client *client)
+{
+	int ret = 0;
+
+	if (!client) {
+		ret = -EIO;
+		goto out;
+	}
+
+	if (strlen(client->name) == 0) {
+		pr_info("i40evf: Failed to register client with no name\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	if (vf_registered_client) {
+		pr_info("i40evf: Client %s has already been registered!\n",
+			client->name);
+		ret = -EEXIST;
+		goto out;
+	}
+
+	if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
+	    (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
+		pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
+			client->name);
+		pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+			client->version.major, client->version.minor,
+			client->version.build,
+			i40evf_client_interface_version_str);
+		ret = -EIO;
+		goto out;
+	}
+
+	vf_registered_client = client;
+
+	i40evf_client_prepare(client);
+
+	pr_info("i40evf: Registered client %s with return code %d\n",
+		client->name, ret);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(i40evf_register_client);
+
+/**
+ * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_unregister_client(struct i40e_client *client)
+{
+	int ret = 0;
+
+	/* When a unregister request comes through we would have to send
+	 * a close for each of the client instances that were opened.
+	 * client_release function is called to handle this.
+	 */
+	i40evf_client_release(client);
+
+	if (vf_registered_client != client) {
+		pr_info("i40evf: Client %s has not been registered\n",
+			client->name);
+		ret = -ENODEV;
+		goto out;
+	}
+	vf_registered_client = NULL;
+	pr_info("i40evf: Unregistered client %s\n", client->name);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(i40evf_unregister_client);
diff --git a/drivers/net/ethernet/intel/iavf/i40evf_client.h b/drivers/net/ethernet/intel/iavf/i40evf_client.h
new file mode 100644
index 000000000000..5585f362048a
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40evf_client.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40EVF_CLIENT_H_
+#define _I40EVF_CLIENT_H_
+
+#define I40EVF_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40EVF_CLIENT_VERSION_MAJOR 0
+#define I40EVF_CLIENT_VERSION_MINOR 01
+#define I40EVF_CLIENT_VERSION_BUILD 00
+#define I40EVF_CLIENT_VERSION_STR     \
+	__stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
+	__stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
+	__stringify(I40EVF_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+	u8 major;
+	u8 minor;
+	u8 build;
+	u8 rsvd;
+};
+
+enum i40e_client_state {
+	__I40E_CLIENT_NULL,
+	__I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+	__I40E_CLIENT_INSTANCE_NONE,
+	__I40E_CLIENT_INSTANCE_OPENED,
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ  0x80
+#define I40E_QUEUE_INVALID_IDX	0xFFFF
+
+struct i40e_qv_info {
+	u32 v_idx; /* msix_vector */
+	u16 ceq_idx;
+	u16 aeq_idx;
+	u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+	u32 num_vectors;
+	struct i40e_qv_info qv_info[1];
+};
+
+#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+	u16 qs_handle; /* qs handle for prio */
+	u8 tc; /* TC mapped to prio */
+	u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY        8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+	struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+	struct i40e_qos_params qos;
+	u16 mtu;
+	u16 link_up; /* boolean */
+};
+
+/* Structure to hold LAN device info for a client device */
+struct i40e_info {
+	struct i40e_client_version version;
+	u8 lanmac[6];
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	u8 __iomem *hw_addr;
+	u8 fid;	/* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+#define I40E_CLIENT_FTYPE_VF 1
+	u8 ftype; /* function type, PF or VF */
+	void *vf; /* cast to i40evf_adapter */
+
+	/* All L2 params that could change during the life span of the device
+	 * and needs to be communicated to the client when they change
+	 */
+	struct i40e_params params;
+	struct i40e_ops *ops;
+
+	u16 msix_count;	 /* number of msix vectors*/
+	/* Array down below will be dynamically allocated based on msix_count */
+	struct msix_entry *msix_entries;
+	u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+};
+
+struct i40e_ops {
+	/* setup_q_vector_list enables queues with a particular vector */
+	int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+			    struct i40e_qvlist_info *qv_info);
+
+	u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+			     u8 *msg, u16 len);
+
+	/* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
+	void (*request_reset)(struct i40e_info *ldev,
+			      struct i40e_client *client);
+};
+
+struct i40e_client_ops {
+	/* Should be called from register_client() or whenever the driver is
+	 * ready to create a specific client instance.
+	 */
+	int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+	/* Should be closed when netdev is unavailable or when unregister
+	 * call comes in. If the close happens due to a reset, set the reset
+	 * bit to true.
+	 */
+	void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+		      bool reset);
+
+	/* called when some l2 managed parameters changes - mss */
+	void (*l2_param_change)(struct i40e_info *ldev,
+				struct i40e_client *client,
+				struct i40e_params *params);
+
+	/* called when a message is received from the PF */
+	int (*virtchnl_receive)(struct i40e_info *ldev,
+				struct i40e_client *client,
+				u8 *msg, u16 len);
+};
+
+/* Client device */
+struct i40e_client_instance {
+	struct list_head list;
+	struct i40e_info lan_info;
+	struct i40e_client *client;
+	unsigned long  state;
+};
+
+struct i40e_client {
+	struct list_head list;		/* list of registered clients */
+	char name[I40EVF_CLIENT_STR_LENGTH];
+	struct i40e_client_version version;
+	unsigned long state;		/* client state */
+	atomic_t ref_cnt;  /* Count of all the client devices of this kind */
+	u32 flags;
+#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE	BIT(0)
+#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS	BIT(2)
+	u8 type;
+#define I40E_CLIENT_IWARP 0
+	struct i40e_client_ops *ops;	/* client ops provided by the client */
+};
+
+/* used by clients */
+int i40evf_register_client(struct i40e_client *client);
+int i40evf_unregister_client(struct i40e_client *client);
+#endif /* _I40EVF_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c b/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c
new file mode 100644
index 000000000000..69efe0aec76a
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+/* ethtool support for i40evf */
+#include "i40evf.h"
+
+#include <linux/uaccess.h>
+
+struct i40evf_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int stat_offset;
+};
+
+#define I40EVF_STAT(_name, _stat) { \
+	.stat_string = _name, \
+	.stat_offset = offsetof(struct i40evf_adapter, _stat) \
+}
+
+/* All stats are u64, so we don't need to track the size of the field. */
+static const struct i40evf_stats i40evf_gstrings_stats[] = {
+	I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
+	I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
+	I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
+	I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
+	I40EVF_STAT("rx_discards", current_stats.rx_discards),
+	I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
+	I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
+	I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
+	I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
+	I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
+	I40EVF_STAT("tx_discards", current_stats.tx_discards),
+	I40EVF_STAT("tx_errors", current_stats.tx_errors),
+};
+
+#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
+#define I40EVF_QUEUE_STATS_LEN(_dev) \
+	(((struct i40evf_adapter *)\
+		netdev_priv(_dev))->num_active_queues \
+		  * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
+#define I40EVF_STATS_LEN(_dev) \
+	(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
+
+/* For now we have one and only one private flag and it is only defined
+ * when we have support for the SKIP_CPU_SYNC DMA attribute.  Instead
+ * of leaving all this code sitting around empty we will strip it unless
+ * our one private flag is actually available.
+ */
+struct i40evf_priv_flags {
+	char flag_string[ETH_GSTRING_LEN];
+	u32 flag;
+	bool read_only;
+};
+
+#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \
+	.flag_string = _name, \
+	.flag = _flag, \
+	.read_only = _read_only, \
+}
+
+static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = {
+	I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0),
+};
+
+#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags)
+
+/**
+ * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @cmd: ethtool command
+ *
+ * Reports speed/duplex settings. Because this is a VF, we don't know what
+ * kind of link we really have, so we fake it.
+ **/
+static int i40evf_get_link_ksettings(struct net_device *netdev,
+				     struct ethtool_link_ksettings *cmd)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	ethtool_link_ksettings_zero_link_mode(cmd, supported);
+	cmd->base.autoneg = AUTONEG_DISABLE;
+	cmd->base.port = PORT_NONE;
+	/* Set speed and duplex */
+	switch (adapter->link_speed) {
+	case I40E_LINK_SPEED_40GB:
+		cmd->base.speed = SPEED_40000;
+		break;
+	case I40E_LINK_SPEED_25GB:
+#ifdef SPEED_25000
+		cmd->base.speed = SPEED_25000;
+#else
+		netdev_info(netdev,
+			    "Speed is 25G, display not supported by this version of ethtool.\n");
+#endif
+		break;
+	case I40E_LINK_SPEED_20GB:
+		cmd->base.speed = SPEED_20000;
+		break;
+	case I40E_LINK_SPEED_10GB:
+		cmd->base.speed = SPEED_10000;
+		break;
+	case I40E_LINK_SPEED_1GB:
+		cmd->base.speed = SPEED_1000;
+		break;
+	case I40E_LINK_SPEED_100MB:
+		cmd->base.speed = SPEED_100;
+		break;
+	default:
+		break;
+	}
+	cmd->base.duplex = DUPLEX_FULL;
+
+	return 0;
+}
+
+/**
+ * i40evf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of string table. This driver only supports
+ * strings for statistics.
+ **/
+static int i40evf_get_sset_count(struct net_device *netdev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return I40EVF_STATS_LEN(netdev);
+	else if (sset == ETH_SS_PRIV_FLAGS)
+		return I40EVF_PRIV_FLAGS_STR_LEN;
+	else
+		return -EINVAL;
+}
+
+/**
+ * i40evf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ **/
+static void i40evf_get_ethtool_stats(struct net_device *netdev,
+				     struct ethtool_stats *stats, u64 *data)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	unsigned int i, j;
+	char *p;
+
+	for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+		p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
+		data[i] =  *(u64 *)p;
+	}
+	for (j = 0; j < adapter->num_active_queues; j++) {
+		data[i++] = adapter->tx_rings[j].stats.packets;
+		data[i++] = adapter->tx_rings[j].stats.bytes;
+	}
+	for (j = 0; j < adapter->num_active_queues; j++) {
+		data[i++] = adapter->rx_rings[j].stats.packets;
+		data[i++] = adapter->rx_rings[j].stats.bytes;
+	}
+}
+
+/**
+ * i40evf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds stats string table.
+ **/
+static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	u8 *p = data;
+	int i;
+
+	if (sset == ETH_SS_STATS) {
+		for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, i40evf_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < adapter->num_active_queues; i++) {
+			snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
+			p += ETH_GSTRING_LEN;
+			snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < adapter->num_active_queues; i++) {
+			snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
+			p += ETH_GSTRING_LEN;
+			snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+	} else if (sset == ETH_SS_PRIV_FLAGS) {
+		for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+			snprintf(p, ETH_GSTRING_LEN, "%s",
+				 i40evf_gstrings_priv_flags[i].flag_string);
+			p += ETH_GSTRING_LEN;
+		}
+	}
+}
+
+/**
+ * i40evf_get_priv_flags - report device private flags
+ * @netdev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned.  Add new strings for each flag to the i40e_gstrings_priv_flags
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40evf_get_priv_flags(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	u32 i, ret_flags = 0;
+
+	for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+		const struct i40evf_priv_flags *priv_flags;
+
+		priv_flags = &i40evf_gstrings_priv_flags[i];
+
+		if (priv_flags->flag & adapter->flags)
+			ret_flags |= BIT(i);
+	}
+
+	return ret_flags;
+}
+
+/**
+ * i40evf_set_priv_flags - set private flags
+ * @netdev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	u32 orig_flags, new_flags, changed_flags;
+	u32 i;
+
+	orig_flags = READ_ONCE(adapter->flags);
+	new_flags = orig_flags;
+
+	for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+		const struct i40evf_priv_flags *priv_flags;
+
+		priv_flags = &i40evf_gstrings_priv_flags[i];
+
+		if (flags & BIT(i))
+			new_flags |= priv_flags->flag;
+		else
+			new_flags &= ~(priv_flags->flag);
+
+		if (priv_flags->read_only &&
+		    ((orig_flags ^ new_flags) & ~BIT(i)))
+			return -EOPNOTSUPP;
+	}
+
+	/* Before we finalize any flag changes, any checks which we need to
+	 * perform to determine if the new flags will be supported should go
+	 * here...
+	 */
+
+	/* Compare and exchange the new flags into place. If we failed, that
+	 * is if cmpxchg returns anything but the old value, this means
+	 * something else must have modified the flags variable since we
+	 * copied it. We'll just punt with an error and log something in the
+	 * message buffer.
+	 */
+	if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
+		dev_warn(&adapter->pdev->dev,
+			 "Unable to update adapter->flags as it was modified by another thread...\n");
+		return -EAGAIN;
+	}
+
+	changed_flags = orig_flags ^ new_flags;
+
+	/* Process any additional changes needed as a result of flag changes.
+	 * The changed_flags value reflects the list of bits that were changed
+	 * in the code above.
+	 */
+
+	/* issue a reset to force legacy-rx change to take effect */
+	if (changed_flags & I40EVF_FLAG_LEGACY_RX) {
+		if (netif_running(netdev)) {
+			adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+			schedule_work(&adapter->reset_task);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * i40evf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ **/
+static u32 i40evf_get_msglevel(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	return adapter->msg_enable;
+}
+
+/**
+ * i40evf_set_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ **/
+static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	if (I40E_DEBUG_USER & data)
+		adapter->hw.debug_mask = data;
+	adapter->msg_enable = data;
+}
+
+/**
+ * i40evf_get_drvinfo - Get driver info
+ * @netdev: network interface device structure
+ * @drvinfo: ethool driver info structure
+ *
+ * Returns information about the driver and device for display to the user.
+ **/
+static void i40evf_get_drvinfo(struct net_device *netdev,
+			       struct ethtool_drvinfo *drvinfo)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	strlcpy(drvinfo->driver, i40evf_driver_name, 32);
+	strlcpy(drvinfo->version, i40evf_driver_version, 32);
+	strlcpy(drvinfo->fw_version, "N/A", 4);
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
+}
+
+/**
+ * i40evf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ **/
+static void i40evf_get_ringparam(struct net_device *netdev,
+				 struct ethtool_ringparam *ring)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	ring->rx_max_pending = I40EVF_MAX_RXD;
+	ring->tx_max_pending = I40EVF_MAX_TXD;
+	ring->rx_pending = adapter->rx_desc_count;
+	ring->tx_pending = adapter->tx_desc_count;
+}
+
+/**
+ * i40evf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ **/
+static int i40evf_set_ringparam(struct net_device *netdev,
+				struct ethtool_ringparam *ring)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	u32 new_rx_count, new_tx_count;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	new_tx_count = clamp_t(u32, ring->tx_pending,
+			       I40EVF_MIN_TXD,
+			       I40EVF_MAX_TXD);
+	new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+	new_rx_count = clamp_t(u32, ring->rx_pending,
+			       I40EVF_MIN_RXD,
+			       I40EVF_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+	/* if nothing to do return success */
+	if ((new_tx_count == adapter->tx_desc_count) &&
+	    (new_rx_count == adapter->rx_desc_count))
+		return 0;
+
+	adapter->tx_desc_count = new_tx_count;
+	adapter->rx_desc_count = new_rx_count;
+
+	if (netif_running(netdev)) {
+		adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+		schedule_work(&adapter->reset_task);
+	}
+
+	return 0;
+}
+
+/**
+ * __i40evf_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
+ *
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
+ **/
+static int __i40evf_get_coalesce(struct net_device *netdev,
+				 struct ethtool_coalesce *ec,
+				 int queue)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_vsi *vsi = &adapter->vsi;
+	struct i40e_ring *rx_ring, *tx_ring;
+
+	ec->tx_max_coalesced_frames = vsi->work_limit;
+	ec->rx_max_coalesced_frames = vsi->work_limit;
+
+	/* Rx and Tx usecs per queue value. If user doesn't specify the
+	 * queue, return queue 0's value to represent.
+	 */
+	if (queue < 0)
+		queue = 0;
+	else if (queue >= adapter->num_active_queues)
+		return -EINVAL;
+
+	rx_ring = &adapter->rx_rings[queue];
+	tx_ring = &adapter->tx_rings[queue];
+
+	if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
+		ec->use_adaptive_rx_coalesce = 1;
+
+	if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
+		ec->use_adaptive_tx_coalesce = 1;
+
+	ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
+	ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
+
+	return 0;
+}
+
+/**
+ * i40evf_get_coalesce - Get interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality. Note that if per-queue settings have been modified this
+ * only represents the settings of queue 0.
+ **/
+static int i40evf_get_coalesce(struct net_device *netdev,
+			       struct ethtool_coalesce *ec)
+{
+	return __i40evf_get_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_get_per_queue_coalesce - get coalesce values for specific queue
+ * @netdev: netdev to read
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to read
+ *
+ * Read specific queue's coalesce settings.
+ **/
+static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
+					 u32 queue,
+					 struct ethtool_coalesce *ec)
+{
+	return __i40evf_get_coalesce(netdev, ec, queue);
+}
+
+/**
+ * i40evf_set_itr_per_queue - set ITR values for specific queue
+ * @adapter: the VF adapter struct to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
+				     struct ethtool_coalesce *ec,
+				     int queue)
+{
+	struct i40e_ring *rx_ring = &adapter->rx_rings[queue];
+	struct i40e_ring *tx_ring = &adapter->tx_rings[queue];
+	struct i40e_q_vector *q_vector;
+
+	rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
+	tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
+
+	rx_ring->itr_setting |= I40E_ITR_DYNAMIC;
+	if (!ec->use_adaptive_rx_coalesce)
+		rx_ring->itr_setting ^= I40E_ITR_DYNAMIC;
+
+	tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
+	if (!ec->use_adaptive_tx_coalesce)
+		tx_ring->itr_setting ^= I40E_ITR_DYNAMIC;
+
+	q_vector = rx_ring->q_vector;
+	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
+
+	q_vector = tx_ring->q_vector;
+	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
+
+	/* The interrupt handler itself will take care of programming
+	 * the Tx and Rx ITR values based on the values we have entered
+	 * into the q_vector, no need to write the values now.
+	 */
+}
+
+/**
+ * __i40evf_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
+static int __i40evf_set_coalesce(struct net_device *netdev,
+				 struct ethtool_coalesce *ec,
+				 int queue)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_vsi *vsi = &adapter->vsi;
+	int i;
+
+	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+		vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+	if (ec->rx_coalesce_usecs == 0) {
+		if (ec->use_adaptive_rx_coalesce)
+			netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+	} else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) ||
+		   (ec->rx_coalesce_usecs > I40E_MAX_ITR)) {
+		netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
+		return -EINVAL;
+	}
+
+	else
+	if (ec->tx_coalesce_usecs == 0) {
+		if (ec->use_adaptive_tx_coalesce)
+			netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+	} else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) ||
+		   (ec->tx_coalesce_usecs > I40E_MAX_ITR)) {
+		netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
+		return -EINVAL;
+	}
+
+	/* Rx and Tx usecs has per queue value. If user doesn't specify the
+	 * queue, apply to all queues.
+	 */
+	if (queue < 0) {
+		for (i = 0; i < adapter->num_active_queues; i++)
+			i40evf_set_itr_per_queue(adapter, ec, i);
+	} else if (queue < adapter->num_active_queues) {
+		i40evf_set_itr_per_queue(adapter, ec, queue);
+	} else {
+		netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
+			   adapter->num_active_queues - 1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings for every queue.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+			       struct ethtool_coalesce *ec)
+{
+	return __i40evf_set_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to modify
+ *
+ * Modifies a specific queue's coalesce settings.
+ */
+static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
+					 u32 queue,
+					 struct ethtool_coalesce *ec)
+{
+	return __i40evf_set_coalesce(netdev, ec, queue);
+}
+
+/**
+ * i40evf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule locations
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_get_rxnfc(struct net_device *netdev,
+			    struct ethtool_rxnfc *cmd,
+			    u32 *rule_locs)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int ret = -EOPNOTSUPP;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		cmd->data = adapter->num_active_queues;
+		ret = 0;
+		break;
+	case ETHTOOL_GRXFH:
+		netdev_info(netdev,
+			    "RSS hash info is not available to vf, use pf.\n");
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+/**
+ * i40evf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void i40evf_get_channels(struct net_device *netdev,
+				struct ethtool_channels *ch)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	/* Report maximum channels */
+	ch->max_combined = I40EVF_MAX_REQ_QUEUES;
+
+	ch->max_other = NONQ_VECS;
+	ch->other_count = NONQ_VECS;
+
+	ch->combined_count = adapter->num_active_queues;
+}
+
+/**
+ * i40evf_set_channels: set the new channel count
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * Negotiate a new number of channels with the PF then do a reset.  During
+ * reset we'll realloc queues and fix the RSS table.  Returns 0 on success,
+ * negative on failure.
+ **/
+static int i40evf_set_channels(struct net_device *netdev,
+			       struct ethtool_channels *ch)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int num_req = ch->combined_count;
+
+	if (num_req != adapter->num_active_queues &&
+	    !(adapter->vf_res->vf_cap_flags &
+	      VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
+		dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
+		return -EINVAL;
+	}
+
+	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+	    adapter->num_tc) {
+		dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
+		return -EINVAL;
+	}
+
+	/* All of these should have already been checked by ethtool before this
+	 * even gets to us, but just to be sure.
+	 */
+	if (num_req <= 0 || num_req > I40EVF_MAX_REQ_QUEUES)
+		return -EINVAL;
+
+	if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
+		return -EINVAL;
+
+	adapter->num_req_queues = num_req;
+	return i40evf_request_queues(adapter, num_req);
+}
+
+/**
+ * i40evf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	return adapter->rss_key_size;
+}
+
+/**
+ * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	return adapter->rss_lut_size;
+}
+
+/**
+ * i40evf_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function in use
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+			   u8 *hfunc)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	u16 i;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+	if (!indir)
+		return 0;
+
+	memcpy(key, adapter->rss_key, adapter->rss_key_size);
+
+	/* Each 32 bits pointed by 'indir' is stored with a lut entry */
+	for (i = 0; i < adapter->rss_lut_size; i++)
+		indir[i] = (u32)adapter->rss_lut[i];
+
+	return 0;
+}
+
+/**
+ * i40evf_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function to use
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
+			   const u8 *key, const u8 hfunc)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	u16 i;
+
+	/* We do not allow change in unsupported parameters */
+	if (key ||
+	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+		return -EOPNOTSUPP;
+	if (!indir)
+		return 0;
+
+	if (key) {
+		memcpy(adapter->rss_key, key, adapter->rss_key_size);
+	}
+
+	/* Each 32 bits pointed by 'indir' is stored with a lut entry */
+	for (i = 0; i < adapter->rss_lut_size; i++)
+		adapter->rss_lut[i] = (u8)(indir[i]);
+
+	return i40evf_config_rss(adapter);
+}
+
+static const struct ethtool_ops i40evf_ethtool_ops = {
+	.get_drvinfo		= i40evf_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_ringparam		= i40evf_get_ringparam,
+	.set_ringparam		= i40evf_set_ringparam,
+	.get_strings		= i40evf_get_strings,
+	.get_ethtool_stats	= i40evf_get_ethtool_stats,
+	.get_sset_count		= i40evf_get_sset_count,
+	.get_priv_flags		= i40evf_get_priv_flags,
+	.set_priv_flags		= i40evf_set_priv_flags,
+	.get_msglevel		= i40evf_get_msglevel,
+	.set_msglevel		= i40evf_set_msglevel,
+	.get_coalesce		= i40evf_get_coalesce,
+	.set_coalesce		= i40evf_set_coalesce,
+	.get_per_queue_coalesce = i40evf_get_per_queue_coalesce,
+	.set_per_queue_coalesce = i40evf_set_per_queue_coalesce,
+	.get_rxnfc		= i40evf_get_rxnfc,
+	.get_rxfh_indir_size	= i40evf_get_rxfh_indir_size,
+	.get_rxfh		= i40evf_get_rxfh,
+	.set_rxfh		= i40evf_set_rxfh,
+	.get_channels		= i40evf_get_channels,
+	.set_channels		= i40evf_set_channels,
+	.get_rxfh_key_size	= i40evf_get_rxfh_key_size,
+	.get_link_ksettings	= i40evf_get_link_ksettings,
+};
+
+/**
+ * i40evf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ **/
+void i40evf_set_ethtool_ops(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &i40evf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/intel/iavf/i40evf_main.c b/drivers/net/ethernet/intel/iavf/i40evf_main.c
new file mode 100644
index 000000000000..60c2e5df5827
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40evf_main.c
@@ -0,0 +1,3990 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+#include "i40evf_client.h"
+/* All i40evf tracepoints are defined by the include below, which must
+ * be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "i40e_trace.h"
+
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static int i40evf_close(struct net_device *netdev);
+
+char i40evf_driver_name[] = "i40evf";
+static const char i40evf_driver_string[] =
+	"Intel(R) Ethernet Adaptive Virtual Function Network Driver";
+
+#define DRV_KERN "-k"
+
+#define DRV_VERSION_MAJOR 3
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 3
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+	     __stringify(DRV_VERSION_MINOR) "." \
+	     __stringify(DRV_VERSION_BUILD) \
+	     DRV_KERN
+const char i40evf_driver_version[] = DRV_VERSION;
+static const char i40evf_copyright[] =
+	"Copyright (c) 2013 - 2018 Intel Corporation.";
+
+/* i40evf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ *   Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id i40evf_pci_tbl[] = {
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
+	/* required last entry */
+	{0, }
+};
+
+MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
+
+MODULE_ALIAS("i40evf");
+MODULE_AUTHOR("Intel Corporation, <linux.nics@...el.com>");
+MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static struct workqueue_struct *i40evf_wq;
+
+/**
+ * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
+				      struct i40e_dma_mem *mem,
+				      u64 size, u32 alignment)
+{
+	struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+	if (!mem)
+		return I40E_ERR_PARAM;
+
+	mem->size = ALIGN(size, alignment);
+	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
+	if (mem->va)
+		return 0;
+	else
+		return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_dma_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+	struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+	if (!mem || !mem->va)
+		return I40E_ERR_PARAM;
+	dma_free_coherent(&adapter->pdev->dev, mem->size,
+			  mem->va, (dma_addr_t)mem->pa);
+	return 0;
+}
+
+/**
+ * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
+				       struct i40e_virt_mem *mem, u32 size)
+{
+	if (!mem)
+		return I40E_ERR_PARAM;
+
+	mem->size = size;
+	mem->va = kzalloc(size, GFP_KERNEL);
+
+	if (mem->va)
+		return 0;
+	else
+		return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_virt_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
+				   struct i40e_virt_mem *mem)
+{
+	if (!mem)
+		return I40E_ERR_PARAM;
+
+	/* it's ok to kfree a NULL pointer */
+	kfree(mem->va);
+
+	return 0;
+}
+
+/**
+ * i40evf_debug_d - OS dependent version of debug printing
+ * @hw:  pointer to the HW structure
+ * @mask: debug level mask
+ * @fmt_str: printf-type format description
+ **/
+void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+{
+	char buf[512];
+	va_list argptr;
+
+	if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+		return;
+
+	va_start(argptr, fmt_str);
+	vsnprintf(buf, sizeof(buf), fmt_str, argptr);
+	va_end(argptr);
+
+	/* the debug string is already formatted with a newline */
+	pr_info("%s", buf);
+}
+
+/**
+ * i40evf_schedule_reset - Set the flags and schedule a reset event
+ * @adapter: board private structure
+ **/
+void i40evf_schedule_reset(struct i40evf_adapter *adapter)
+{
+	if (!(adapter->flags &
+	      (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
+		adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+		schedule_work(&adapter->reset_task);
+	}
+}
+
+/**
+ * i40evf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void i40evf_tx_timeout(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	adapter->tx_timeout_count++;
+	i40evf_schedule_reset(adapter);
+}
+
+/**
+ * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+
+	if (!adapter->msix_entries)
+		return;
+
+	wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+
+	/* read flush */
+	rd32(hw, I40E_VFGEN_RSTAT);
+
+	synchronize_irq(adapter->msix_entries[0].vector);
+}
+
+/**
+ * i40evf_misc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+
+	wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
+				       I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
+
+	/* read flush */
+	rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_irq_disable(struct i40evf_adapter *adapter)
+{
+	int i;
+	struct i40e_hw *hw = &adapter->hw;
+
+	if (!adapter->msix_entries)
+		return;
+
+	for (i = 1; i < adapter->num_msix_vectors; i++) {
+		wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
+		synchronize_irq(adapter->msix_entries[i].vector);
+	}
+	/* read flush */
+	rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_irq_enable_queues - Enable interrupt for specified queues
+ * @adapter: board private structure
+ * @mask: bitmap of queues to enable
+ **/
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	int i;
+
+	for (i = 1; i < adapter->num_msix_vectors; i++) {
+		if (mask & BIT(i - 1)) {
+			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
+			     I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+			     I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+		}
+	}
+}
+
+/**
+ * i40evf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ * @flush: boolean value whether to run rd32()
+ **/
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
+{
+	struct i40e_hw *hw = &adapter->hw;
+
+	i40evf_misc_irq_enable(adapter);
+	i40evf_irq_enable_queues(adapter, ~0);
+
+	if (flush)
+		rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_msix_aq - Interrupt handler for vector 0
+ * @irq: interrupt number
+ * @data: pointer to netdev
+ **/
+static irqreturn_t i40evf_msix_aq(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_hw *hw = &adapter->hw;
+
+	/* handle non-queue interrupts, these reads clear the registers */
+	rd32(hw, I40E_VFINT_ICR01);
+	rd32(hw, I40E_VFINT_ICR0_ENA1);
+
+	/* schedule work on the private workqueue */
+	schedule_work(&adapter->adminq_task);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
+{
+	struct i40e_q_vector *q_vector = data;
+
+	if (!q_vector->tx.ring && !q_vector->rx.ring)
+		return IRQ_HANDLED;
+
+	napi_schedule_irqoff(&q_vector->napi);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_map_vector_to_rxq - associate irqs with rx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @r_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
+{
+	struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
+	struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
+	struct i40e_hw *hw = &adapter->hw;
+
+	rx_ring->q_vector = q_vector;
+	rx_ring->next = q_vector->rx.ring;
+	rx_ring->vsi = &adapter->vsi;
+	q_vector->rx.ring = rx_ring;
+	q_vector->rx.count++;
+	q_vector->rx.next_update = jiffies + 1;
+	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
+	q_vector->ring_mask |= BIT(r_idx);
+	wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx),
+	     q_vector->rx.current_itr);
+	q_vector->rx.current_itr = q_vector->rx.target_itr;
+}
+
+/**
+ * i40evf_map_vector_to_txq - associate irqs with tx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @t_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
+{
+	struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
+	struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
+	struct i40e_hw *hw = &adapter->hw;
+
+	tx_ring->q_vector = q_vector;
+	tx_ring->next = q_vector->tx.ring;
+	tx_ring->vsi = &adapter->vsi;
+	q_vector->tx.ring = tx_ring;
+	q_vector->tx.count++;
+	q_vector->tx.next_update = jiffies + 1;
+	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
+	q_vector->num_ringpairs++;
+	wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx),
+	     q_vector->tx.target_itr);
+	q_vector->tx.current_itr = q_vector->tx.target_itr;
+}
+
+/**
+ * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code.  Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible.  You would add new
+ * mapping configurations in here.
+ **/
+static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+{
+	int rings_remaining = adapter->num_active_queues;
+	int ridx = 0, vidx = 0;
+	int q_vectors;
+
+	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (; ridx < rings_remaining; ridx++) {
+		i40evf_map_vector_to_rxq(adapter, vidx, ridx);
+		i40evf_map_vector_to_txq(adapter, vidx, ridx);
+
+		/* In the case where we have more queues than vectors, continue
+		 * round-robin on vectors until all queues are mapped.
+		 */
+		if (++vidx >= q_vectors)
+			vidx = 0;
+	}
+
+	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+}
+
+/**
+ * i40evf_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * so that we may register to receive changes to the irq affinity masks.
+ **/
+static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
+				       const cpumask_t *mask)
+{
+	struct i40e_q_vector *q_vector =
+		container_of(notify, struct i40e_q_vector, affinity_notify);
+
+	cpumask_copy(&q_vector->affinity_mask, mask);
+}
+
+/**
+ * i40evf_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ **/
+static void i40evf_irq_affinity_release(struct kref *ref) {}
+
+/**
+ * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ * @basename: device basename
+ *
+ * Allocates MSI-X vectors for tx and rx handling, and requests
+ * interrupts from the kernel.
+ **/
+static int
+i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
+{
+	unsigned int vector, q_vectors;
+	unsigned int rx_int_idx = 0, tx_int_idx = 0;
+	int irq_num, err;
+	int cpu;
+
+	i40evf_irq_disable(adapter);
+	/* Decrement for Other and TCP Timer vectors */
+	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (vector = 0; vector < q_vectors; vector++) {
+		struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
+		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
+
+		if (q_vector->tx.ring && q_vector->rx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name),
+				 "i40evf-%s-TxRx-%d", basename, rx_int_idx++);
+			tx_int_idx++;
+		} else if (q_vector->rx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name),
+				 "i40evf-%s-rx-%d", basename, rx_int_idx++);
+		} else if (q_vector->tx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name),
+				 "i40evf-%s-tx-%d", basename, tx_int_idx++);
+		} else {
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(irq_num,
+				  i40evf_msix_clean_rings,
+				  0,
+				  q_vector->name,
+				  q_vector);
+		if (err) {
+			dev_info(&adapter->pdev->dev,
+				 "Request_irq failed, error: %d\n", err);
+			goto free_queue_irqs;
+		}
+		/* register for affinity change notifications */
+		q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
+		q_vector->affinity_notify.release =
+						   i40evf_irq_affinity_release;
+		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+		/* Spread the IRQ affinity hints across online CPUs. Note that
+		 * get_cpu_mask returns a mask with a permanent lifetime so
+		 * it's safe to use as a hint for irq_set_affinity_hint.
+		 */
+		cpu = cpumask_local_spread(q_vector->v_idx, -1);
+		irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+	}
+
+	return 0;
+
+free_queue_irqs:
+	while (vector) {
+		vector--;
+		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
+		irq_set_affinity_notifier(irq_num, NULL);
+		irq_set_affinity_hint(irq_num, NULL);
+		free_irq(irq_num, &adapter->q_vectors[vector]);
+	}
+	return err;
+}
+
+/**
+ * i40evf_request_misc_irq - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
+ * vector is only for the admin queue, and stays active even when the netdev
+ * is closed.
+ **/
+static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+	snprintf(adapter->misc_vector_name,
+		 sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
+		 dev_name(&adapter->pdev->dev));
+	err = request_irq(adapter->msix_entries[0].vector,
+			  &i40evf_msix_aq, 0,
+			  adapter->misc_vector_name, netdev);
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"request_irq for %s failed: %d\n",
+			adapter->misc_vector_name, err);
+		free_irq(adapter->msix_entries[0].vector, netdev);
+	}
+	return err;
+}
+
+/**
+ * i40evf_free_traffic_irqs - Free MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Frees all MSI-X vectors other than 0.
+ **/
+static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
+{
+	int vector, irq_num, q_vectors;
+
+	if (!adapter->msix_entries)
+		return;
+
+	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (vector = 0; vector < q_vectors; vector++) {
+		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
+		irq_set_affinity_notifier(irq_num, NULL);
+		irq_set_affinity_hint(irq_num, NULL);
+		free_irq(irq_num, &adapter->q_vectors[vector]);
+	}
+}
+
+/**
+ * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
+ * @adapter: board private structure
+ *
+ * Frees MSI-X vector 0.
+ **/
+static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (!adapter->msix_entries)
+		return;
+
+	free_irq(adapter->msix_entries[0].vector, netdev);
+}
+
+/**
+ * i40evf_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_tx(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	int i;
+
+	for (i = 0; i < adapter->num_active_queues; i++)
+		adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+}
+
+/**
+ * i40evf_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_rx(struct i40evf_adapter *adapter)
+{
+	unsigned int rx_buf_len = I40E_RXBUFFER_2048;
+	struct i40e_hw *hw = &adapter->hw;
+	int i;
+
+	/* Legacy Rx will always default to a 2048 buffer size. */
+#if (PAGE_SIZE < 8192)
+	if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
+		struct net_device *netdev = adapter->netdev;
+
+		/* For jumbo frames on systems with 4K pages we have to use
+		 * an order 1 page, so we might as well increase the size
+		 * of our Rx buffer to make better use of the available space
+		 */
+		rx_buf_len = I40E_RXBUFFER_3072;
+
+		/* We use a 1536 buffer size for configurations with
+		 * standard Ethernet mtu.  On x86 this gives us enough room
+		 * for shared info and 192 bytes of padding.
+		 */
+		if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+		    (netdev->mtu <= ETH_DATA_LEN))
+			rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+	}
+#endif
+
+	for (i = 0; i < adapter->num_active_queues; i++) {
+		adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+		adapter->rx_rings[i].rx_buf_len = rx_buf_len;
+
+		if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
+			clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
+		else
+			set_ring_build_skb_enabled(&adapter->rx_rings[i]);
+	}
+}
+
+/**
+ * i40evf_find_vlan - Search filter list for specific vlan filter
+ * @adapter: board private structure
+ * @vlan: vlan tag
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
+ **/
+static struct
+i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+	struct i40evf_vlan_filter *f;
+
+	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+		if (vlan == f->vlan)
+			return f;
+	}
+	return NULL;
+}
+
+/**
+ * i40evf_add_vlan - Add a vlan filter to the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+	struct i40evf_vlan_filter *f = NULL;
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	f = i40evf_find_vlan(adapter, vlan);
+	if (!f) {
+		f = kzalloc(sizeof(*f), GFP_KERNEL);
+		if (!f)
+			goto clearout;
+
+		f->vlan = vlan;
+
+		INIT_LIST_HEAD(&f->list);
+		list_add(&f->list, &adapter->vlan_filter_list);
+		f->add = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+	}
+
+clearout:
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+	return f;
+}
+
+/**
+ * i40evf_del_vlan - Remove a vlan filter from the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ **/
+static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+	struct i40evf_vlan_filter *f;
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	f = i40evf_find_vlan(adapter, vlan);
+	if (f) {
+		f->remove = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+	}
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
+ * @netdev: network device struct
+ * @proto: unused protocol data
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
+				  __always_unused __be16 proto, u16 vid)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	if (!VLAN_ALLOWED(adapter))
+		return -EIO;
+	if (i40evf_add_vlan(adapter, vid) == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
+ * @netdev: network device struct
+ * @proto: unused protocol data
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
+				   __always_unused __be16 proto, u16 vid)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	if (VLAN_ALLOWED(adapter)) {
+		i40evf_del_vlan(adapter, vid);
+		return 0;
+	}
+	return -EIO;
+}
+
+/**
+ * i40evf_find_filter - Search filter list for specific mac filter
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
+ **/
+static struct
+i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
+				      const u8 *macaddr)
+{
+	struct i40evf_mac_filter *f;
+
+	if (!macaddr)
+		return NULL;
+
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		if (ether_addr_equal(macaddr, f->macaddr))
+			return f;
+	}
+	return NULL;
+}
+
+/**
+ * i40e_add_filter - Add a mac filter to the filter list
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
+				     const u8 *macaddr)
+{
+	struct i40evf_mac_filter *f;
+
+	if (!macaddr)
+		return NULL;
+
+	f = i40evf_find_filter(adapter, macaddr);
+	if (!f) {
+		f = kzalloc(sizeof(*f), GFP_ATOMIC);
+		if (!f)
+			return f;
+
+		ether_addr_copy(f->macaddr, macaddr);
+
+		list_add_tail(&f->list, &adapter->mac_filter_list);
+		f->add = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+	} else {
+		f->remove = false;
+	}
+
+	return f;
+}
+
+/**
+ * i40evf_set_mac - NDO callback to set port mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_set_mac(struct net_device *netdev, void *p)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40evf_mac_filter *f;
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+		return 0;
+
+	if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
+		return -EPERM;
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	f = i40evf_find_filter(adapter, hw->mac.addr);
+	if (f) {
+		f->remove = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+	}
+
+	f = i40evf_add_filter(adapter, addr->sa_data);
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	if (f) {
+		ether_addr_copy(hw->mac.addr, addr->sa_data);
+		ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+	}
+
+	return (f == NULL) ? -ENOMEM : 0;
+}
+
+/**
+ * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	if (i40evf_add_filter(adapter, addr))
+		return 0;
+	else
+		return -ENOMEM;
+}
+
+/**
+ * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40evf_mac_filter *f;
+
+	/* Under some circumstances, we might receive a request to delete
+	 * our own device address from our uc list. Because we store the
+	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
+	 * such requests and not delete our device address from this list.
+	 */
+	if (ether_addr_equal(addr, netdev->dev_addr))
+		return 0;
+
+	f = i40evf_find_filter(adapter, addr);
+	if (f) {
+		f->remove = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+	}
+	return 0;
+}
+
+/**
+ * i40evf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40evf_set_rx_mode(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+	__dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
+	__dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	if (netdev->flags & IFF_PROMISC &&
+	    !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
+		adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
+	else if (!(netdev->flags & IFF_PROMISC) &&
+		 adapter->flags & I40EVF_FLAG_PROMISC_ON)
+		adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
+
+	if (netdev->flags & IFF_ALLMULTI &&
+	    !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
+		adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+	else if (!(netdev->flags & IFF_ALLMULTI) &&
+		 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
+		adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
+}
+
+/**
+ * i40evf_napi_enable_all - enable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
+{
+	int q_idx;
+	struct i40e_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		struct napi_struct *napi;
+
+		q_vector = &adapter->q_vectors[q_idx];
+		napi = &q_vector->napi;
+		napi_enable(napi);
+	}
+}
+
+/**
+ * i40evf_napi_disable_all - disable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
+{
+	int q_idx;
+	struct i40e_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		q_vector = &adapter->q_vectors[q_idx];
+		napi_disable(&q_vector->napi);
+	}
+}
+
+/**
+ * i40evf_configure - set up transmit and receive data structures
+ * @adapter: board private structure
+ **/
+static void i40evf_configure(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	i40evf_set_rx_mode(netdev);
+
+	i40evf_configure_tx(adapter);
+	i40evf_configure_rx(adapter);
+	adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+
+	for (i = 0; i < adapter->num_active_queues; i++) {
+		struct i40e_ring *ring = &adapter->rx_rings[i];
+
+		i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+	}
+}
+
+/**
+ * i40evf_up_complete - Finish the last steps of bringing up a connection
+ * @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
+ **/
+static void i40evf_up_complete(struct i40evf_adapter *adapter)
+{
+	adapter->state = __I40EVF_RUNNING;
+	clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+
+	i40evf_napi_enable_all(adapter);
+
+	adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+	if (CLIENT_ENABLED(adapter))
+		adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
+	mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
+ **/
+void i40evf_down(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct i40evf_vlan_filter *vlf;
+	struct i40evf_mac_filter *f;
+	struct i40evf_cloud_filter *cf;
+
+	if (adapter->state <= __I40EVF_DOWN_PENDING)
+		return;
+
+	netif_carrier_off(netdev);
+	netif_tx_disable(netdev);
+	adapter->link_up = false;
+	i40evf_napi_disable_all(adapter);
+	i40evf_irq_disable(adapter);
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	/* clear the sync flag on all filters */
+	__dev_uc_unsync(adapter->netdev, NULL);
+	__dev_mc_unsync(adapter->netdev, NULL);
+
+	/* remove all MAC filters */
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		f->remove = true;
+	}
+
+	/* remove all VLAN filters */
+	list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
+		vlf->remove = true;
+	}
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	/* remove all cloud filters */
+	spin_lock_bh(&adapter->cloud_filter_list_lock);
+	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+		cf->del = true;
+	}
+	spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
+	if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
+	    adapter->state != __I40EVF_RESETTING) {
+		/* cancel any current operation */
+		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+		/* Schedule operations to close down the HW. Don't wait
+		 * here for this to complete. The watchdog is still running
+		 * and it will take care of this.
+		 */
+		adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+	}
+
+	mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+}
+
+/**
+ * i40evf_acquire_msix_vectors - Setup the MSIX capability
+ * @adapter: board private structure
+ * @vectors: number of vectors to request
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int
+i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
+{
+	int err, vector_threshold;
+
+	/* We'll want at least 3 (vector_threshold):
+	 * 0) Other (Admin Queue and link, mostly)
+	 * 1) TxQ[0] Cleanup
+	 * 2) RxQ[0] Cleanup
+	 */
+	vector_threshold = MIN_MSIX_COUNT;
+
+	/* The more we get, the more we will assign to Tx/Rx Cleanup
+	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+	 * Right now, we simply care about how many we'll get; we'll
+	 * set them up later while requesting irq's.
+	 */
+	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+				    vector_threshold, vectors);
+	if (err < 0) {
+		dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+		return err;
+	}
+
+	/* Adjust for only the vectors we'll use, which is minimum
+	 * of max_msix_q_vectors + NONQ_VECS, or the number of
+	 * vectors we were allocated.
+	 */
+	adapter->num_msix_vectors = err;
+	return 0;
+}
+
+/**
+ * i40evf_free_queues - Free memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * Free all of the memory associated with queue pairs.
+ **/
+static void i40evf_free_queues(struct i40evf_adapter *adapter)
+{
+	if (!adapter->vsi_res)
+		return;
+	adapter->num_active_queues = 0;
+	kfree(adapter->tx_rings);
+	adapter->tx_rings = NULL;
+	kfree(adapter->rx_rings);
+	adapter->rx_rings = NULL;
+}
+
+/**
+ * i40evf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.  The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
+{
+	int i, num_active_queues;
+
+	/* If we're in reset reallocating queues we don't actually know yet for
+	 * certain the PF gave us the number of queues we asked for but we'll
+	 * assume it did.  Once basic reset is finished we'll confirm once we
+	 * start negotiating config with PF.
+	 */
+	if (adapter->num_req_queues)
+		num_active_queues = adapter->num_req_queues;
+	else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+		 adapter->num_tc)
+		num_active_queues = adapter->ch_config.total_qps;
+	else
+		num_active_queues = min_t(int,
+					  adapter->vsi_res->num_queue_pairs,
+					  (int)(num_online_cpus()));
+
+
+	adapter->tx_rings = kcalloc(num_active_queues,
+				    sizeof(struct i40e_ring), GFP_KERNEL);
+	if (!adapter->tx_rings)
+		goto err_out;
+	adapter->rx_rings = kcalloc(num_active_queues,
+				    sizeof(struct i40e_ring), GFP_KERNEL);
+	if (!adapter->rx_rings)
+		goto err_out;
+
+	for (i = 0; i < num_active_queues; i++) {
+		struct i40e_ring *tx_ring;
+		struct i40e_ring *rx_ring;
+
+		tx_ring = &adapter->tx_rings[i];
+
+		tx_ring->queue_index = i;
+		tx_ring->netdev = adapter->netdev;
+		tx_ring->dev = &adapter->pdev->dev;
+		tx_ring->count = adapter->tx_desc_count;
+		tx_ring->itr_setting = I40E_ITR_TX_DEF;
+		if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
+			tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
+
+		rx_ring = &adapter->rx_rings[i];
+		rx_ring->queue_index = i;
+		rx_ring->netdev = adapter->netdev;
+		rx_ring->dev = &adapter->pdev->dev;
+		rx_ring->count = adapter->rx_desc_count;
+		rx_ring->itr_setting = I40E_ITR_RX_DEF;
+	}
+
+	adapter->num_active_queues = num_active_queues;
+
+	return 0;
+
+err_out:
+	i40evf_free_queues(adapter);
+	return -ENOMEM;
+}
+
+/**
+ * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
+{
+	int vector, v_budget;
+	int pairs = 0;
+	int err = 0;
+
+	if (!adapter->vsi_res) {
+		err = -EIO;
+		goto out;
+	}
+	pairs = adapter->num_active_queues;
+
+	/* It's easy to be greedy for MSI-X vectors, but it really doesn't do
+	 * us much good if we have more vectors than CPUs. However, we already
+	 * limit the total number of queues by the number of CPUs so we do not
+	 * need any further limiting here.
+	 */
+	v_budget = min_t(int, pairs + NONQ_VECS,
+			 (int)adapter->vf_res->max_vectors);
+
+	adapter->msix_entries = kcalloc(v_budget,
+					sizeof(struct msix_entry), GFP_KERNEL);
+	if (!adapter->msix_entries) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	for (vector = 0; vector < v_budget; vector++)
+		adapter->msix_entries[vector].entry = vector;
+
+	err = i40evf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+	netif_set_real_num_rx_queues(adapter->netdev, pairs);
+	netif_set_real_num_tx_queues(adapter->netdev, pairs);
+	return err;
+}
+
+/**
+ * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
+{
+	struct i40e_aqc_get_set_rss_key_data *rss_key =
+		(struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
+	struct i40e_hw *hw = &adapter->hw;
+	int ret = 0;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
+			adapter->current_op);
+		return -EBUSY;
+	}
+
+	ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+	if (ret) {
+		dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
+			i40evf_stat_str(hw, ret),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
+		return ret;
+
+	}
+
+	ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+				    adapter->rss_lut, adapter->rss_lut_size);
+	if (ret) {
+		dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
+			i40evf_stat_str(hw, ret),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
+	}
+
+	return ret;
+
+}
+
+/**
+ * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	u32 *dw;
+	u16 i;
+
+	dw = (u32 *)adapter->rss_key;
+	for (i = 0; i <= adapter->rss_key_size / 4; i++)
+		wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
+
+	dw = (u32 *)adapter->rss_lut;
+	for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+		wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
+
+	i40e_flush(hw);
+
+	return 0;
+}
+
+/**
+ * i40evf_config_rss - Configure RSS keys and lut
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40evf_config_rss(struct i40evf_adapter *adapter)
+{
+
+	if (RSS_PF(adapter)) {
+		adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
+					I40EVF_FLAG_AQ_SET_RSS_KEY;
+		return 0;
+	} else if (RSS_AQ(adapter)) {
+		return i40evf_config_rss_aq(adapter);
+	} else {
+		return i40evf_config_rss_reg(adapter);
+	}
+}
+
+/**
+ * i40evf_fill_rss_lut - Fill the lut with default values
+ * @adapter: board private structure
+ **/
+static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
+{
+	u16 i;
+
+	for (i = 0; i < adapter->rss_lut_size; i++)
+		adapter->rss_lut[i] = i % adapter->num_active_queues;
+}
+
+/**
+ * i40evf_init_rss - Prepare for RSS
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_init_rss(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	int ret;
+
+	if (!RSS_PF(adapter)) {
+		/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+		if (adapter->vf_res->vf_cap_flags &
+		    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+			adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+		else
+			adapter->hena = I40E_DEFAULT_RSS_HENA;
+
+		wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
+		wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+	}
+
+	i40evf_fill_rss_lut(adapter);
+
+	netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
+	ret = i40evf_config_rss(adapter);
+
+	return ret;
+}
+
+/**
+ * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
+{
+	int q_idx = 0, num_q_vectors;
+	struct i40e_q_vector *q_vector;
+
+	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+	adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
+				     GFP_KERNEL);
+	if (!adapter->q_vectors)
+		return -ENOMEM;
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		q_vector = &adapter->q_vectors[q_idx];
+		q_vector->adapter = adapter;
+		q_vector->vsi = &adapter->vsi;
+		q_vector->v_idx = q_idx;
+		q_vector->reg_idx = q_idx;
+		cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
+		netif_napi_add(adapter->netdev, &q_vector->napi,
+			       i40evf_napi_poll, NAPI_POLL_WEIGHT);
+	}
+
+	return 0;
+}
+
+/**
+ * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
+{
+	int q_idx, num_q_vectors;
+	int napi_vectors;
+
+	if (!adapter->q_vectors)
+		return;
+
+	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+	napi_vectors = adapter->num_active_queues;
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
+		if (q_idx < napi_vectors)
+			netif_napi_del(&q_vector->napi);
+	}
+	kfree(adapter->q_vectors);
+	adapter->q_vectors = NULL;
+}
+
+/**
+ * i40evf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
+{
+	if (!adapter->msix_entries)
+		return;
+
+	pci_disable_msix(adapter->pdev);
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+}
+
+/**
+ * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+	int err;
+
+	err = i40evf_alloc_queues(adapter);
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to allocate memory for queues\n");
+		goto err_alloc_queues;
+	}
+
+	rtnl_lock();
+	err = i40evf_set_interrupt_capability(adapter);
+	rtnl_unlock();
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to setup interrupt capabilities\n");
+		goto err_set_interrupt;
+	}
+
+	err = i40evf_alloc_q_vectors(adapter);
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to allocate memory for queue vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	/* If we've made it so far while ADq flag being ON, then we haven't
+	 * bailed out anywhere in middle. And ADq isn't just enabled but actual
+	 * resources have been allocated in the reset path.
+	 * Now we can truly claim that ADq is enabled.
+	 */
+	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+	    adapter->num_tc)
+		dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
+			 adapter->num_tc);
+
+	dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
+		 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
+		 adapter->num_active_queues);
+
+	return 0;
+err_alloc_q_vectors:
+	i40evf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+	i40evf_free_queues(adapter);
+err_alloc_queues:
+	return err;
+}
+
+/**
+ * i40evf_free_rss - Free memory used by RSS structs
+ * @adapter: board private structure
+ **/
+static void i40evf_free_rss(struct i40evf_adapter *adapter)
+{
+	kfree(adapter->rss_key);
+	adapter->rss_key = NULL;
+
+	kfree(adapter->rss_lut);
+	adapter->rss_lut = NULL;
+}
+
+/**
+ * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+	if (netif_running(netdev))
+		i40evf_free_traffic_irqs(adapter);
+	i40evf_free_misc_irq(adapter);
+	i40evf_reset_interrupt_capability(adapter);
+	i40evf_free_q_vectors(adapter);
+	i40evf_free_queues(adapter);
+
+	err =  i40evf_init_interrupt_scheme(adapter);
+	if (err)
+		goto err;
+
+	netif_tx_stop_all_queues(netdev);
+
+	err = i40evf_request_misc_irq(adapter);
+	if (err)
+		goto err;
+
+	set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+
+	i40evf_map_rings_to_vectors(adapter);
+
+	if (RSS_AQ(adapter))
+		adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+	else
+		err = i40evf_init_rss(adapter);
+err:
+	return err;
+}
+
+/**
+ * i40evf_watchdog_timer - Periodic call-back timer
+ * @data: pointer to adapter disguised as unsigned long
+ **/
+static void i40evf_watchdog_timer(struct timer_list *t)
+{
+	struct i40evf_adapter *adapter = from_timer(adapter, t,
+						    watchdog_timer);
+
+	schedule_work(&adapter->watchdog_task);
+	/* timer will be rescheduled in watchdog task */
+}
+
+/**
+ * i40evf_watchdog_task - Periodic call-back task
+ * @work: pointer to work_struct
+ **/
+static void i40evf_watchdog_task(struct work_struct *work)
+{
+	struct i40evf_adapter *adapter = container_of(work,
+						      struct i40evf_adapter,
+						      watchdog_task);
+	struct i40e_hw *hw = &adapter->hw;
+	u32 reg_val;
+
+	if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+		goto restart_watchdog;
+
+	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+		reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+			  I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+		if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
+		    (reg_val == VIRTCHNL_VFR_COMPLETED)) {
+			/* A chance for redemption! */
+			dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
+			adapter->state = __I40EVF_STARTUP;
+			adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+			schedule_delayed_work(&adapter->init_task, 10);
+			clear_bit(__I40EVF_IN_CRITICAL_TASK,
+				  &adapter->crit_section);
+			/* Don't reschedule the watchdog, since we've restarted
+			 * the init task. When init_task contacts the PF and
+			 * gets everything set up again, it'll restart the
+			 * watchdog for us. Down, boy. Sit. Stay. Woof.
+			 */
+			return;
+		}
+		adapter->aq_required = 0;
+		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+		goto watchdog_done;
+	}
+
+	if ((adapter->state < __I40EVF_DOWN) ||
+	    (adapter->flags & I40EVF_FLAG_RESET_PENDING))
+		goto watchdog_done;
+
+	/* check for reset */
+	reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
+	if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
+		adapter->state = __I40EVF_RESETTING;
+		adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
+		schedule_work(&adapter->reset_task);
+		adapter->aq_required = 0;
+		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+		goto watchdog_done;
+	}
+
+	/* Process admin queue tasks. After init, everything gets done
+	 * here so we don't race on the admin queue.
+	 */
+	if (adapter->current_op) {
+		if (!i40evf_asq_done(hw)) {
+			dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
+			i40evf_send_api_ver(adapter);
+		}
+		goto watchdog_done;
+	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
+		i40evf_send_vf_config_msg(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
+		i40evf_disable_queues(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
+		i40evf_map_queues(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
+		i40evf_add_ether_addrs(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
+		i40evf_add_vlans(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
+		i40evf_del_ether_addrs(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
+		i40evf_del_vlans(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
+		i40evf_enable_vlan_stripping(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
+		i40evf_disable_vlan_stripping(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
+		i40evf_configure_queues(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
+		i40evf_enable_queues(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
+		/* This message goes straight to the firmware, not the
+		 * PF, so we don't have to set current_op as we will
+		 * not get a response through the ARQ.
+		 */
+		i40evf_init_rss(adapter);
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
+		goto watchdog_done;
+	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
+		i40evf_get_hena(adapter);
+		goto watchdog_done;
+	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
+		i40evf_set_hena(adapter);
+		goto watchdog_done;
+	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
+		i40evf_set_rss_key(adapter);
+		goto watchdog_done;
+	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
+		i40evf_set_rss_lut(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
+		i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+				       FLAG_VF_MULTICAST_PROMISC);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
+		i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
+		goto watchdog_done;
+	}
+
+	if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
+	    (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+		i40evf_set_promiscuous(adapter, 0);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) {
+		i40evf_enable_channels(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) {
+		i40evf_disable_channels(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) {
+		i40evf_add_cloud_filter(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) {
+		i40evf_del_cloud_filter(adapter);
+		goto watchdog_done;
+	}
+
+	schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
+
+	if (adapter->state == __I40EVF_RUNNING)
+		i40evf_request_stats(adapter);
+watchdog_done:
+	if (adapter->state == __I40EVF_RUNNING)
+		i40evf_detect_recover_hung(&adapter->vsi);
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+restart_watchdog:
+	if (adapter->state == __I40EVF_REMOVE)
+		return;
+	if (adapter->aq_required)
+		mod_timer(&adapter->watchdog_timer,
+			  jiffies + msecs_to_jiffies(20));
+	else
+		mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
+	schedule_work(&adapter->adminq_task);
+}
+
+static void i40evf_disable_vf(struct i40evf_adapter *adapter)
+{
+	struct i40evf_mac_filter *f, *ftmp;
+	struct i40evf_vlan_filter *fv, *fvtmp;
+	struct i40evf_cloud_filter *cf, *cftmp;
+
+	adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+
+	/* We don't use netif_running() because it may be true prior to
+	 * ndo_open() returning, so we can't assume it means all our open
+	 * tasks have finished, since we're not holding the rtnl_lock here.
+	 */
+	if (adapter->state == __I40EVF_RUNNING) {
+		set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+		netif_carrier_off(adapter->netdev);
+		netif_tx_disable(adapter->netdev);
+		adapter->link_up = false;
+		i40evf_napi_disable_all(adapter);
+		i40evf_irq_disable(adapter);
+		i40evf_free_traffic_irqs(adapter);
+		i40evf_free_all_tx_resources(adapter);
+		i40evf_free_all_rx_resources(adapter);
+	}
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	/* Delete all of the filters */
+	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+		list_del(&f->list);
+		kfree(f);
+	}
+
+	list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
+		list_del(&fv->list);
+		kfree(fv);
+	}
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	spin_lock_bh(&adapter->cloud_filter_list_lock);
+	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
+		list_del(&cf->list);
+		kfree(cf);
+		adapter->num_cloud_filters--;
+	}
+	spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
+	i40evf_free_misc_irq(adapter);
+	i40evf_reset_interrupt_capability(adapter);
+	i40evf_free_queues(adapter);
+	i40evf_free_q_vectors(adapter);
+	kfree(adapter->vf_res);
+	i40evf_shutdown_adminq(&adapter->hw);
+	adapter->netdev->flags &= ~IFF_UP;
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+	adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+	adapter->state = __I40EVF_DOWN;
+	wake_up(&adapter->down_waitqueue);
+	dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
+}
+
+#define I40EVF_RESET_WAIT_MS 10
+#define I40EVF_RESET_WAIT_COUNT 500
+/**
+ * i40evf_reset_task - Call-back task to handle hardware reset
+ * @work: pointer to work_struct
+ *
+ * During reset we need to shut down and reinitialize the admin queue
+ * before we can use it to communicate with the PF again. We also clear
+ * and reinit the rings because that context is lost as well.
+ **/
+static void i40evf_reset_task(struct work_struct *work)
+{
+	struct i40evf_adapter *adapter = container_of(work,
+						      struct i40evf_adapter,
+						      reset_task);
+	struct virtchnl_vf_resource *vfres = adapter->vf_res;
+	struct net_device *netdev = adapter->netdev;
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40evf_vlan_filter *vlf;
+	struct i40evf_cloud_filter *cf;
+	struct i40evf_mac_filter *f;
+	u32 reg_val;
+	int i = 0, err;
+	bool running;
+
+	/* When device is being removed it doesn't make sense to run the reset
+	 * task, just return in such a case.
+	 */
+	if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
+		return;
+
+	while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
+	if (CLIENT_ENABLED(adapter)) {
+		adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
+				    I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
+				    I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
+				    I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
+		cancel_delayed_work_sync(&adapter->client_task);
+		i40evf_notify_client_close(&adapter->vsi, true);
+	}
+	i40evf_misc_irq_disable(adapter);
+	if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
+		adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
+		/* Restart the AQ here. If we have been reset but didn't
+		 * detect it, or if the PF had to reinit, our AQ will be hosed.
+		 */
+		i40evf_shutdown_adminq(hw);
+		i40evf_init_adminq(hw);
+		i40evf_request_reset(adapter);
+	}
+	adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+
+	/* poll until we see the reset actually happen */
+	for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+		reg_val = rd32(hw, I40E_VF_ARQLEN1) &
+			  I40E_VF_ARQLEN1_ARQENABLE_MASK;
+		if (!reg_val)
+			break;
+		usleep_range(5000, 10000);
+	}
+	if (i == I40EVF_RESET_WAIT_COUNT) {
+		dev_info(&adapter->pdev->dev, "Never saw reset\n");
+		goto continue_reset; /* act like the reset happened */
+	}
+
+	/* wait until the reset is complete and the PF is responding to us */
+	for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+		/* sleep first to make sure a minimum wait time is met */
+		msleep(I40EVF_RESET_WAIT_MS);
+
+		reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+			  I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+		if (reg_val == VIRTCHNL_VFR_VFACTIVE)
+			break;
+	}
+
+	pci_set_master(adapter->pdev);
+
+	if (i == I40EVF_RESET_WAIT_COUNT) {
+		dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
+			reg_val);
+		i40evf_disable_vf(adapter);
+		clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+		return; /* Do not attempt to reinit. It's dead, Jim. */
+	}
+
+continue_reset:
+	/* We don't use netif_running() because it may be true prior to
+	 * ndo_open() returning, so we can't assume it means all our open
+	 * tasks have finished, since we're not holding the rtnl_lock here.
+	 */
+	running = ((adapter->state == __I40EVF_RUNNING) ||
+		   (adapter->state == __I40EVF_RESETTING));
+
+	if (running) {
+		netif_carrier_off(netdev);
+		netif_tx_stop_all_queues(netdev);
+		adapter->link_up = false;
+		i40evf_napi_disable_all(adapter);
+	}
+	i40evf_irq_disable(adapter);
+
+	adapter->state = __I40EVF_RESETTING;
+	adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+
+	/* free the Tx/Rx rings and descriptors, might be better to just
+	 * re-use them sometime in the future
+	 */
+	i40evf_free_all_rx_resources(adapter);
+	i40evf_free_all_tx_resources(adapter);
+
+	adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED;
+	/* kill and reinit the admin queue */
+	i40evf_shutdown_adminq(hw);
+	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+	err = i40evf_init_adminq(hw);
+	if (err)
+		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
+			 err);
+	adapter->aq_required = 0;
+
+	if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
+		err = i40evf_reinit_interrupt_scheme(adapter);
+		if (err)
+			goto reset_err;
+	}
+
+	adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
+	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	/* re-add all MAC filters */
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		f->add = true;
+	}
+	/* re-add all VLAN filters */
+	list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
+		vlf->add = true;
+	}
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	/* check if TCs are running and re-add all cloud filters */
+	spin_lock_bh(&adapter->cloud_filter_list_lock);
+	if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+	    adapter->num_tc) {
+		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+			cf->add = true;
+		}
+	}
+	spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
+	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
+	i40evf_misc_irq_enable(adapter);
+
+	mod_timer(&adapter->watchdog_timer, jiffies + 2);
+
+	/* We were running when the reset started, so we need to restore some
+	 * state here.
+	 */
+	if (running) {
+		/* allocate transmit descriptors */
+		err = i40evf_setup_all_tx_resources(adapter);
+		if (err)
+			goto reset_err;
+
+		/* allocate receive descriptors */
+		err = i40evf_setup_all_rx_resources(adapter);
+		if (err)
+			goto reset_err;
+
+		if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
+			err = i40evf_request_traffic_irqs(adapter,
+							  netdev->name);
+			if (err)
+				goto reset_err;
+
+			adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+		}
+
+		i40evf_configure(adapter);
+
+		i40evf_up_complete(adapter);
+
+		i40evf_irq_enable(adapter, true);
+	} else {
+		adapter->state = __I40EVF_DOWN;
+		wake_up(&adapter->down_waitqueue);
+	}
+	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+	return;
+reset_err:
+	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
+	i40evf_close(netdev);
+}
+
+/**
+ * i40evf_adminq_task - worker thread to clean the admin queue
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40evf_adminq_task(struct work_struct *work)
+{
+	struct i40evf_adapter *adapter =
+		container_of(work, struct i40evf_adapter, adminq_task);
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40e_arq_event_info event;
+	enum virtchnl_ops v_op;
+	i40e_status ret, v_ret;
+	u32 val, oldval;
+	u16 pending;
+
+	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+		goto out;
+
+	event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
+	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+	if (!event.msg_buf)
+		goto out;
+
+	do {
+		ret = i40evf_clean_arq_element(hw, &event, &pending);
+		v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+		v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+
+		if (ret || !v_op)
+			break; /* No event to process or error cleaning ARQ */
+
+		i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
+					   event.msg_len);
+		if (pending != 0)
+			memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
+	} while (pending);
+
+	if ((adapter->flags &
+	     (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
+	    adapter->state == __I40EVF_RESETTING)
+		goto freedom;
+
+	/* check for error indications */
+	val = rd32(hw, hw->aq.arq.len);
+	if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
+		goto freedom;
+	oldval = val;
+	if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
+		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
+		val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
+	}
+	if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
+		dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
+		val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
+	}
+	if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
+		dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
+		val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
+	}
+	if (oldval != val)
+		wr32(hw, hw->aq.arq.len, val);
+
+	val = rd32(hw, hw->aq.asq.len);
+	oldval = val;
+	if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
+		dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
+		val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
+	}
+	if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
+		dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
+		val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
+	}
+	if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+		dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
+		val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
+	}
+	if (oldval != val)
+		wr32(hw, hw->aq.asq.len, val);
+
+freedom:
+	kfree(event.msg_buf);
+out:
+	/* re-enable Admin queue interrupt cause */
+	i40evf_misc_irq_enable(adapter);
+}
+
+/**
+ * i40evf_client_task - worker thread to perform client work
+ * @work: pointer to work_struct containing our data
+ *
+ * This task handles client interactions. Because client calls can be
+ * reentrant, we can't handle them in the watchdog.
+ **/
+static void i40evf_client_task(struct work_struct *work)
+{
+	struct i40evf_adapter *adapter =
+		container_of(work, struct i40evf_adapter, client_task.work);
+
+	/* If we can't get the client bit, just give up. We'll be rescheduled
+	 * later.
+	 */
+
+	if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
+		return;
+
+	if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
+		i40evf_client_subtask(adapter);
+		adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+		goto out;
+	}
+	if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+		i40evf_notify_client_l2_params(&adapter->vsi);
+		adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+		goto out;
+	}
+	if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
+		i40evf_notify_client_close(&adapter->vsi, false);
+		adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
+		goto out;
+	}
+	if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
+		i40evf_notify_client_open(&adapter->vsi);
+		adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
+	}
+out:
+	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+}
+
+/**
+ * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+{
+	int i;
+
+	if (!adapter->tx_rings)
+		return;
+
+	for (i = 0; i < adapter->num_active_queues; i++)
+		if (adapter->tx_rings[i].desc)
+			i40evf_free_tx_resources(&adapter->tx_rings[i]);
+}
+
+/**
+ * i40evf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_active_queues; i++) {
+		adapter->tx_rings[i].count = adapter->tx_desc_count;
+		err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
+		if (!err)
+			continue;
+		dev_err(&adapter->pdev->dev,
+			"Allocation for Tx Queue %u failed\n", i);
+		break;
+	}
+
+	return err;
+}
+
+/**
+ * i40evf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_active_queues; i++) {
+		adapter->rx_rings[i].count = adapter->rx_desc_count;
+		err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
+		if (!err)
+			continue;
+		dev_err(&adapter->pdev->dev,
+			"Allocation for Rx Queue %u failed\n", i);
+		break;
+	}
+	return err;
+}
+
+/**
+ * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+{
+	int i;
+
+	if (!adapter->rx_rings)
+		return;
+
+	for (i = 0; i < adapter->num_active_queues; i++)
+		if (adapter->rx_rings[i].desc)
+			i40evf_free_rx_resources(&adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth
+ * @adapter: board private structure
+ * @max_tx_rate: max Tx bw for a tc
+ **/
+static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
+					u64 max_tx_rate)
+{
+	int speed = 0, ret = 0;
+
+	switch (adapter->link_speed) {
+	case I40E_LINK_SPEED_40GB:
+		speed = 40000;
+		break;
+	case I40E_LINK_SPEED_25GB:
+		speed = 25000;
+		break;
+	case I40E_LINK_SPEED_20GB:
+		speed = 20000;
+		break;
+	case I40E_LINK_SPEED_10GB:
+		speed = 10000;
+		break;
+	case I40E_LINK_SPEED_1GB:
+		speed = 1000;
+		break;
+	case I40E_LINK_SPEED_100MB:
+		speed = 100;
+		break;
+	default:
+		break;
+	}
+
+	if (max_tx_rate > speed) {
+		dev_err(&adapter->pdev->dev,
+			"Invalid tx rate specified\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/**
+ * i40evf_validate_channel_config - validate queue mapping info
+ * @adapter: board private structure
+ * @mqprio_qopt: queue parameters
+ *
+ * This function validates if the config provided by the user to
+ * configure queue channels is valid or not. Returns 0 on a valid
+ * config.
+ **/
+static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
+				     struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+	u64 total_max_rate = 0;
+	int i, num_qps = 0;
+	u64 tx_rate = 0;
+	int ret = 0;
+
+	if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS ||
+	    mqprio_qopt->qopt.num_tc < 1)
+		return -EINVAL;
+
+	for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
+		if (!mqprio_qopt->qopt.count[i] ||
+		    mqprio_qopt->qopt.offset[i] != num_qps)
+			return -EINVAL;
+		if (mqprio_qopt->min_rate[i]) {
+			dev_err(&adapter->pdev->dev,
+				"Invalid min tx rate (greater than 0) specified\n");
+			return -EINVAL;
+		}
+		/*convert to Mbps */
+		tx_rate = div_u64(mqprio_qopt->max_rate[i],
+				  I40EVF_MBPS_DIVISOR);
+		total_max_rate += tx_rate;
+		num_qps += mqprio_qopt->qopt.count[i];
+	}
+	if (num_qps > I40EVF_MAX_REQ_QUEUES)
+		return -EINVAL;
+
+	ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
+	return ret;
+}
+
+/**
+ * i40evf_del_all_cloud_filters - delete all cloud filters
+ * on the traffic classes
+ **/
+static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
+{
+	struct i40evf_cloud_filter *cf, *cftmp;
+
+	spin_lock_bh(&adapter->cloud_filter_list_lock);
+	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+				 list) {
+		list_del(&cf->list);
+		kfree(cf);
+		adapter->num_cloud_filters--;
+	}
+	spin_unlock_bh(&adapter->cloud_filter_list_lock);
+}
+
+/**
+ * __i40evf_setup_tc - configure multiple traffic classes
+ * @netdev: network interface device structure
+ * @type_date: tc offload data
+ *
+ * This function processes the config information provided by the
+ * user to configure traffic classes/queue channels and packages the
+ * information to request the PF to setup traffic classes.
+ *
+ * Returns 0 on success.
+ **/
+static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
+{
+	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct virtchnl_vf_resource *vfres = adapter->vf_res;
+	u8 num_tc = 0, total_qps = 0;
+	int ret = 0, netdev_tc = 0;
+	u64 max_tx_rate;
+	u16 mode;
+	int i;
+
+	num_tc = mqprio_qopt->qopt.num_tc;
+	mode = mqprio_qopt->mode;
+
+	/* delete queue_channel */
+	if (!mqprio_qopt->qopt.hw) {
+		if (adapter->ch_config.state == __I40EVF_TC_RUNNING) {
+			/* reset the tc configuration */
+			netdev_reset_tc(netdev);
+			adapter->num_tc = 0;
+			netif_tx_stop_all_queues(netdev);
+			netif_tx_disable(netdev);
+			i40evf_del_all_cloud_filters(adapter);
+			adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
+			goto exit;
+		} else {
+			return -EINVAL;
+		}
+	}
+
+	/* add queue channel */
+	if (mode == TC_MQPRIO_MODE_CHANNEL) {
+		if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
+			dev_err(&adapter->pdev->dev, "ADq not supported\n");
+			return -EOPNOTSUPP;
+		}
+		if (adapter->ch_config.state != __I40EVF_TC_INVALID) {
+			dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
+			return -EINVAL;
+		}
+
+		ret = i40evf_validate_ch_config(adapter, mqprio_qopt);
+		if (ret)
+			return ret;
+		/* Return if same TC config is requested */
+		if (adapter->num_tc == num_tc)
+			return 0;
+		adapter->num_tc = num_tc;
+
+		for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
+			if (i < num_tc) {
+				adapter->ch_config.ch_info[i].count =
+					mqprio_qopt->qopt.count[i];
+				adapter->ch_config.ch_info[i].offset =
+					mqprio_qopt->qopt.offset[i];
+				total_qps += mqprio_qopt->qopt.count[i];
+				max_tx_rate = mqprio_qopt->max_rate[i];
+				/* convert to Mbps */
+				max_tx_rate = div_u64(max_tx_rate,
+						      I40EVF_MBPS_DIVISOR);
+				adapter->ch_config.ch_info[i].max_tx_rate =
+					max_tx_rate;
+			} else {
+				adapter->ch_config.ch_info[i].count = 1;
+				adapter->ch_config.ch_info[i].offset = 0;
+			}
+		}
+		adapter->ch_config.total_qps = total_qps;
+		netif_tx_stop_all_queues(netdev);
+		netif_tx_disable(netdev);
+		adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS;
+		netdev_reset_tc(netdev);
+		/* Report the tc mapping up the stack */
+		netdev_set_num_tc(adapter->netdev, num_tc);
+		for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
+			u16 qcount = mqprio_qopt->qopt.count[i];
+			u16 qoffset = mqprio_qopt->qopt.offset[i];
+
+			if (i < num_tc)
+				netdev_set_tc_queue(netdev, netdev_tc++, qcount,
+						    qoffset);
+		}
+	}
+exit:
+	return ret;
+}
+
+/**
+ * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel
+ * @adapter: board private structure
+ * @cls_flower: pointer to struct tc_cls_flower_offload
+ * @filter: pointer to cloud filter structure
+ */
+static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
+				   struct tc_cls_flower_offload *f,
+				   struct i40evf_cloud_filter *filter)
+{
+	u16 n_proto_mask = 0;
+	u16 n_proto_key = 0;
+	u8 field_flags = 0;
+	u16 addr_type = 0;
+	u16 n_proto = 0;
+	int i = 0;
+	struct virtchnl_filter *vf = &filter->f;
+
+	if (f->dissector->used_keys &
+	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
+	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
+	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
+	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
+		dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
+			f->dissector->used_keys);
+		return -EOPNOTSUPP;
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+		struct flow_dissector_key_keyid *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ENC_KEYID,
+						  f->mask);
+
+		if (mask->keyid != 0)
+			field_flags |= I40EVF_CLOUD_FIELD_TEN_ID;
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+		struct flow_dissector_key_basic *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_BASIC,
+						  f->key);
+
+		struct flow_dissector_key_basic *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_BASIC,
+						  f->mask);
+		n_proto_key = ntohs(key->n_proto);
+		n_proto_mask = ntohs(mask->n_proto);
+
+		if (n_proto_key == ETH_P_ALL) {
+			n_proto_key = 0;
+			n_proto_mask = 0;
+		}
+		n_proto = n_proto_key & n_proto_mask;
+		if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
+			return -EINVAL;
+		if (n_proto == ETH_P_IPV6) {
+			/* specify flow type as TCP IPv6 */
+			vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
+		}
+
+		if (key->ip_proto != IPPROTO_TCP) {
+			dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+		struct flow_dissector_key_eth_addrs *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
+						  f->key);
+
+		struct flow_dissector_key_eth_addrs *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
+						  f->mask);
+		/* use is_broadcast and is_zero to check for all 0xf or 0 */
+		if (!is_zero_ether_addr(mask->dst)) {
+			if (is_broadcast_ether_addr(mask->dst)) {
+				field_flags |= I40EVF_CLOUD_FIELD_OMAC;
+			} else {
+				dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
+					mask->dst);
+				return I40E_ERR_CONFIG;
+			}
+		}
+
+		if (!is_zero_ether_addr(mask->src)) {
+			if (is_broadcast_ether_addr(mask->src)) {
+				field_flags |= I40EVF_CLOUD_FIELD_IMAC;
+			} else {
+				dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
+					mask->src);
+				return I40E_ERR_CONFIG;
+			}
+		}
+
+		if (!is_zero_ether_addr(key->dst))
+			if (is_valid_ether_addr(key->dst) ||
+			    is_multicast_ether_addr(key->dst)) {
+				/* set the mask if a valid dst_mac address */
+				for (i = 0; i < ETH_ALEN; i++)
+					vf->mask.tcp_spec.dst_mac[i] |= 0xff;
+				ether_addr_copy(vf->data.tcp_spec.dst_mac,
+						key->dst);
+			}
+
+		if (!is_zero_ether_addr(key->src))
+			if (is_valid_ether_addr(key->src) ||
+			    is_multicast_ether_addr(key->src)) {
+				/* set the mask if a valid dst_mac address */
+				for (i = 0; i < ETH_ALEN; i++)
+					vf->mask.tcp_spec.src_mac[i] |= 0xff;
+				ether_addr_copy(vf->data.tcp_spec.src_mac,
+						key->src);
+		}
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+		struct flow_dissector_key_vlan *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_VLAN,
+						  f->key);
+		struct flow_dissector_key_vlan *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_VLAN,
+						  f->mask);
+
+		if (mask->vlan_id) {
+			if (mask->vlan_id == VLAN_VID_MASK) {
+				field_flags |= I40EVF_CLOUD_FIELD_IVLAN;
+			} else {
+				dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
+					mask->vlan_id);
+				return I40E_ERR_CONFIG;
+			}
+		}
+		vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
+		vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+		struct flow_dissector_key_control *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_CONTROL,
+						  f->key);
+
+		addr_type = key->addr_type;
+	}
+
+	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+		struct flow_dissector_key_ipv4_addrs *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+						  f->key);
+		struct flow_dissector_key_ipv4_addrs *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+						  f->mask);
+
+		if (mask->dst) {
+			if (mask->dst == cpu_to_be32(0xffffffff)) {
+				field_flags |= I40EVF_CLOUD_FIELD_IIP;
+			} else {
+				dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
+					be32_to_cpu(mask->dst));
+				return I40E_ERR_CONFIG;
+			}
+		}
+
+		if (mask->src) {
+			if (mask->src == cpu_to_be32(0xffffffff)) {
+				field_flags |= I40EVF_CLOUD_FIELD_IIP;
+			} else {
+				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
+					be32_to_cpu(mask->dst));
+				return I40E_ERR_CONFIG;
+			}
+		}
+
+		if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) {
+			dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
+			return I40E_ERR_CONFIG;
+		}
+		if (key->dst) {
+			vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
+			vf->data.tcp_spec.dst_ip[0] = key->dst;
+		}
+		if (key->src) {
+			vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
+			vf->data.tcp_spec.src_ip[0] = key->src;
+		}
+	}
+
+	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+		struct flow_dissector_key_ipv6_addrs *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+						  f->key);
+		struct flow_dissector_key_ipv6_addrs *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+						  f->mask);
+
+		/* validate mask, make sure it is not IPV6_ADDR_ANY */
+		if (ipv6_addr_any(&mask->dst)) {
+			dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
+				IPV6_ADDR_ANY);
+			return I40E_ERR_CONFIG;
+		}
+
+		/* src and dest IPv6 address should not be LOOPBACK
+		 * (0:0:0:0:0:0:0:1) which can be represented as ::1
+		 */
+		if (ipv6_addr_loopback(&key->dst) ||
+		    ipv6_addr_loopback(&key->src)) {
+			dev_err(&adapter->pdev->dev,
+				"ipv6 addr should not be loopback\n");
+			return I40E_ERR_CONFIG;
+		}
+		if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
+			field_flags |= I40EVF_CLOUD_FIELD_IIP;
+
+		for (i = 0; i < 4; i++)
+			vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
+		memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
+		       sizeof(vf->data.tcp_spec.dst_ip));
+		for (i = 0; i < 4; i++)
+			vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
+		memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
+		       sizeof(vf->data.tcp_spec.src_ip));
+	}
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+		struct flow_dissector_key_ports *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_PORTS,
+						  f->key);
+		struct flow_dissector_key_ports *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_PORTS,
+						  f->mask);
+
+		if (mask->src) {
+			if (mask->src == cpu_to_be16(0xffff)) {
+				field_flags |= I40EVF_CLOUD_FIELD_IIP;
+			} else {
+				dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
+					be16_to_cpu(mask->src));
+				return I40E_ERR_CONFIG;
+			}
+		}
+
+		if (mask->dst) {
+			if (mask->dst == cpu_to_be16(0xffff)) {
+				field_flags |= I40EVF_CLOUD_FIELD_IIP;
+			} else {
+				dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
+					be16_to_cpu(mask->dst));
+				return I40E_ERR_CONFIG;
+			}
+		}
+		if (key->dst) {
+			vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
+			vf->data.tcp_spec.dst_port = key->dst;
+		}
+
+		if (key->src) {
+			vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
+			vf->data.tcp_spec.src_port = key->src;
+		}
+	}
+	vf->field_flags = field_flags;
+
+	return 0;
+}
+
+/**
+ * i40evf_handle_tclass - Forward to a traffic class on the device
+ * @adapter: board private structure
+ * @tc: traffic class index on the device
+ * @filter: pointer to cloud filter structure
+ */
+static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
+				struct i40evf_cloud_filter *filter)
+{
+	if (tc == 0)
+		return 0;
+	if (tc < adapter->num_tc) {
+		if (!filter->f.data.tcp_spec.dst_port) {
+			dev_err(&adapter->pdev->dev,
+				"Specify destination port to redirect to traffic class other than TC0\n");
+			return -EINVAL;
+		}
+	}
+	/* redirect to a traffic class on the same device */
+	filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
+	filter->f.action_meta = tc;
+	return 0;
+}
+
+/**
+ * i40evf_configure_clsflower - Add tc flower filters
+ * @adapter: board private structure
+ * @cls_flower: Pointer to struct tc_cls_flower_offload
+ */
+static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
+				      struct tc_cls_flower_offload *cls_flower)
+{
+	int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
+	struct i40evf_cloud_filter *filter = NULL;
+	int err = -EINVAL, count = 50;
+
+	if (tc < 0) {
+		dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
+		return -EINVAL;
+	}
+
+	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+	if (!filter)
+		return -ENOMEM;
+
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section)) {
+		if (--count == 0)
+			goto err;
+		udelay(1);
+	}
+
+	filter->cookie = cls_flower->cookie;
+
+	/* set the mask to all zeroes to begin with */
+	memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
+	/* start out with flow type and eth type IPv4 to begin with */
+	filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
+	err = i40evf_parse_cls_flower(adapter, cls_flower, filter);
+	if (err < 0)
+		goto err;
+
+	err = i40evf_handle_tclass(adapter, tc, filter);
+	if (err < 0)
+		goto err;
+
+	/* add filter to the list */
+	spin_lock_bh(&adapter->cloud_filter_list_lock);
+	list_add_tail(&filter->list, &adapter->cloud_filter_list);
+	adapter->num_cloud_filters++;
+	filter->add = true;
+	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
+	spin_unlock_bh(&adapter->cloud_filter_list_lock);
+err:
+	if (err)
+		kfree(filter);
+
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+	return err;
+}
+
+/* i40evf_find_cf - Find the cloud filter in the list
+ * @adapter: Board private structure
+ * @cookie: filter specific cookie
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * cloud_filter_list_lock.
+ */
+static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter,
+						  unsigned long *cookie)
+{
+	struct i40evf_cloud_filter *filter = NULL;
+
+	if (!cookie)
+		return NULL;
+
+	list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
+		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+			return filter;
+	}
+	return NULL;
+}
+
+/**
+ * i40evf_delete_clsflower - Remove tc flower filters
+ * @adapter: board private structure
+ * @cls_flower: Pointer to struct tc_cls_flower_offload
+ */
+static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
+				   struct tc_cls_flower_offload *cls_flower)
+{
+	struct i40evf_cloud_filter *filter = NULL;
+	int err = 0;
+
+	spin_lock_bh(&adapter->cloud_filter_list_lock);
+	filter = i40evf_find_cf(adapter, &cls_flower->cookie);
+	if (filter) {
+		filter->del = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
+	} else {
+		err = -EINVAL;
+	}
+	spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
+	return err;
+}
+
+/**
+ * i40evf_setup_tc_cls_flower - flower classifier offloads
+ * @netdev: net device to configure
+ * @type_data: offload data
+ */
+static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
+				      struct tc_cls_flower_offload *cls_flower)
+{
+	if (cls_flower->common.chain_index)
+		return -EOPNOTSUPP;
+
+	switch (cls_flower->command) {
+	case TC_CLSFLOWER_REPLACE:
+		return i40evf_configure_clsflower(adapter, cls_flower);
+	case TC_CLSFLOWER_DESTROY:
+		return i40evf_delete_clsflower(adapter, cls_flower);
+	case TC_CLSFLOWER_STATS:
+		return -EOPNOTSUPP;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
+ * i40evf_setup_tc_block_cb - block callback for tc
+ * @type: type of offload
+ * @type_data: offload data
+ * @cb_priv:
+ *
+ * This function is the block callback for traffic classes
+ **/
+static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+				    void *cb_priv)
+{
+	switch (type) {
+	case TC_SETUP_CLSFLOWER:
+		return i40evf_setup_tc_cls_flower(cb_priv, type_data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
+ * i40evf_setup_tc_block - register callbacks for tc
+ * @netdev: network interface device structure
+ * @f: tc offload data
+ *
+ * This function registers block callbacks for tc
+ * offloads
+ **/
+static int i40evf_setup_tc_block(struct net_device *dev,
+				 struct tc_block_offload *f)
+{
+	struct i40evf_adapter *adapter = netdev_priv(dev);
+
+	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+		return -EOPNOTSUPP;
+
+	switch (f->command) {
+	case TC_BLOCK_BIND:
+		return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
+					     adapter, adapter, f->extack);
+	case TC_BLOCK_UNBIND:
+		tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
+					adapter);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
+ * i40evf_setup_tc - configure multiple traffic classes
+ * @netdev: network interface device structure
+ * @type: type of offload
+ * @type_date: tc offload data
+ *
+ * This function is the callback to ndo_setup_tc in the
+ * netdev_ops.
+ *
+ * Returns 0 on success
+ **/
+static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+			   void *type_data)
+{
+	switch (type) {
+	case TC_SETUP_QDISC_MQPRIO:
+		return __i40evf_setup_tc(netdev, type_data);
+	case TC_SETUP_BLOCK:
+		return i40evf_setup_tc_block(netdev, type_data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
+ * i40evf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int i40evf_open(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int err;
+
+	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+		dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
+		return -EIO;
+	}
+
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
+
+	if (adapter->state != __I40EVF_DOWN) {
+		err = -EBUSY;
+		goto err_unlock;
+	}
+
+	/* allocate transmit descriptors */
+	err = i40evf_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = i40evf_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	/* clear any pending interrupts, may auto mask */
+	err = i40evf_request_traffic_irqs(adapter, netdev->name);
+	if (err)
+		goto err_req_irq;
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	i40evf_add_filter(adapter, adapter->hw.mac.addr);
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	i40evf_configure(adapter);
+
+	i40evf_up_complete(adapter);
+
+	i40evf_irq_enable(adapter, true);
+
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+	return 0;
+
+err_req_irq:
+	i40evf_down(adapter);
+	i40evf_free_traffic_irqs(adapter);
+err_setup_rx:
+	i40evf_free_all_rx_resources(adapter);
+err_setup_tx:
+	i40evf_free_all_tx_resources(adapter);
+err_unlock:
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+	return err;
+}
+
+/**
+ * i40evf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
+ * are freed, along with all transmit and receive resources.
+ **/
+static int i40evf_close(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int status;
+
+	if (adapter->state <= __I40EVF_DOWN_PENDING)
+		return 0;
+
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
+
+	set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+	if (CLIENT_ENABLED(adapter))
+		adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
+
+	i40evf_down(adapter);
+	adapter->state = __I40EVF_DOWN_PENDING;
+	i40evf_free_traffic_irqs(adapter);
+
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+	/* We explicitly don't free resources here because the hardware is
+	 * still active and can DMA into memory. Resources are cleared in
+	 * i40evf_virtchnl_completion() after we get confirmation from the PF
+	 * driver that the rings have been stopped.
+	 *
+	 * Also, we wait for state to transition to __I40EVF_DOWN before
+	 * returning. State change occurs in i40evf_virtchnl_completion() after
+	 * VF resources are released (which occurs after PF driver processes and
+	 * responds to admin queue commands).
+	 */
+
+	status = wait_event_timeout(adapter->down_waitqueue,
+				    adapter->state == __I40EVF_DOWN,
+				    msecs_to_jiffies(200));
+	if (!status)
+		netdev_warn(netdev, "Device resources not yet released\n");
+	return 0;
+}
+
+/**
+ * i40evf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	netdev->mtu = new_mtu;
+	if (CLIENT_ENABLED(adapter)) {
+		i40evf_notify_client_l2_params(&adapter->vsi);
+		adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+	}
+	adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+	schedule_work(&adapter->reset_task);
+
+	return 0;
+}
+
+/**
+ * i40e_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ * Note: expects to be called while under rtnl_lock()
+ **/
+static int i40evf_set_features(struct net_device *netdev,
+			       netdev_features_t features)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	/* Don't allow changing VLAN_RX flag when adapter is not capable
+	 * of VLAN offload
+	 */
+	if (!VLAN_ALLOWED(adapter)) {
+		if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
+			return -EINVAL;
+	} else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
+		if (features & NETIF_F_HW_VLAN_CTAG_RX)
+			adapter->aq_required |=
+				I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+		else
+			adapter->aq_required |=
+				I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+	}
+
+	return 0;
+}
+
+/**
+ * i40evf_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @dev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40evf_features_check(struct sk_buff *skb,
+					       struct net_device *dev,
+					       netdev_features_t features)
+{
+	size_t len;
+
+	/* No point in doing any of this if neither checksum nor GSO are
+	 * being requested for this frame.  We can rule out both by just
+	 * checking for CHECKSUM_PARTIAL
+	 */
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return features;
+
+	/* We cannot support GSO if the MSS is going to be less than
+	 * 64 bytes.  If it is then we need to drop support for GSO.
+	 */
+	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+		features &= ~NETIF_F_GSO_MASK;
+
+	/* MACLEN can support at most 63 words */
+	len = skb_network_header(skb) - skb->data;
+	if (len & ~(63 * 2))
+		goto out_err;
+
+	/* IPLEN and EIPLEN can support at most 127 dwords */
+	len = skb_transport_header(skb) - skb_network_header(skb);
+	if (len & ~(127 * 4))
+		goto out_err;
+
+	if (skb->encapsulation) {
+		/* L4TUNLEN can support 127 words */
+		len = skb_inner_network_header(skb) - skb_transport_header(skb);
+		if (len & ~(127 * 2))
+			goto out_err;
+
+		/* IPLEN can support at most 127 dwords */
+		len = skb_inner_transport_header(skb) -
+		      skb_inner_network_header(skb);
+		if (len & ~(127 * 4))
+			goto out_err;
+	}
+
+	/* No need to validate L4LEN as TCP is the only protocol with a
+	 * a flexible value and we support all possible values supported
+	 * by TCP, which is at most 15 dwords
+	 */
+
+	return features;
+out_err:
+	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
+/**
+ * i40evf_fix_features - fix up the netdev feature bits
+ * @netdev: our net device
+ * @features: desired feature bits
+ *
+ * Returns fixed-up features bits
+ **/
+static netdev_features_t i40evf_fix_features(struct net_device *netdev,
+					     netdev_features_t features)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->vf_res &&
+	    !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+		features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+			      NETIF_F_HW_VLAN_CTAG_RX |
+			      NETIF_F_HW_VLAN_CTAG_FILTER);
+
+	return features;
+}
+
+static const struct net_device_ops i40evf_netdev_ops = {
+	.ndo_open		= i40evf_open,
+	.ndo_stop		= i40evf_close,
+	.ndo_start_xmit		= i40evf_xmit_frame,
+	.ndo_set_rx_mode	= i40evf_set_rx_mode,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= i40evf_set_mac,
+	.ndo_change_mtu		= i40evf_change_mtu,
+	.ndo_tx_timeout		= i40evf_tx_timeout,
+	.ndo_vlan_rx_add_vid	= i40evf_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= i40evf_vlan_rx_kill_vid,
+	.ndo_features_check	= i40evf_features_check,
+	.ndo_fix_features	= i40evf_fix_features,
+	.ndo_set_features	= i40evf_set_features,
+	.ndo_setup_tc		= i40evf_setup_tc,
+};
+
+/**
+ * i40evf_check_reset_complete - check that VF reset is complete
+ * @hw: pointer to hw struct
+ *
+ * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
+ **/
+static int i40evf_check_reset_complete(struct i40e_hw *hw)
+{
+	u32 rstat;
+	int i;
+
+	for (i = 0; i < 100; i++) {
+		rstat = rd32(hw, I40E_VFGEN_RSTAT) &
+			    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+		if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
+		    (rstat == VIRTCHNL_VFR_COMPLETED))
+			return 0;
+		usleep_range(10, 20);
+	}
+	return -EBUSY;
+}
+
+/**
+ * i40evf_process_config - Process the config information we got from the PF
+ * @adapter: board private structure
+ *
+ * Verify that we have a valid config struct, and set up our netdev features
+ * and our VSI struct.
+ **/
+int i40evf_process_config(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *vfres = adapter->vf_res;
+	int i, num_req_queues = adapter->num_req_queues;
+	struct net_device *netdev = adapter->netdev;
+	struct i40e_vsi *vsi = &adapter->vsi;
+	netdev_features_t hw_enc_features;
+	netdev_features_t hw_features;
+
+	/* got VF config message back from PF, now we can parse it */
+	for (i = 0; i < vfres->num_vsis; i++) {
+		if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+			adapter->vsi_res = &vfres->vsi_res[i];
+	}
+	if (!adapter->vsi_res) {
+		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+		return -ENODEV;
+	}
+
+	if (num_req_queues &&
+	    num_req_queues != adapter->vsi_res->num_queue_pairs) {
+		/* Problem.  The PF gave us fewer queues than what we had
+		 * negotiated in our request.  Need a reset to see if we can't
+		 * get back to a working state.
+		 */
+		dev_err(&adapter->pdev->dev,
+			"Requested %d queues, but PF only gave us %d.\n",
+			num_req_queues,
+			adapter->vsi_res->num_queue_pairs);
+		adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+		adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
+		i40evf_schedule_reset(adapter);
+		return -ENODEV;
+	}
+	adapter->num_req_queues = 0;
+
+	hw_enc_features = NETIF_F_SG			|
+			  NETIF_F_IP_CSUM		|
+			  NETIF_F_IPV6_CSUM		|
+			  NETIF_F_HIGHDMA		|
+			  NETIF_F_SOFT_FEATURES	|
+			  NETIF_F_TSO			|
+			  NETIF_F_TSO_ECN		|
+			  NETIF_F_TSO6			|
+			  NETIF_F_SCTP_CRC		|
+			  NETIF_F_RXHASH		|
+			  NETIF_F_RXCSUM		|
+			  0;
+
+	/* advertise to stack only if offloads for encapsulated packets is
+	 * supported
+	 */
+	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
+		hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL	|
+				   NETIF_F_GSO_GRE		|
+				   NETIF_F_GSO_GRE_CSUM		|
+				   NETIF_F_GSO_IPXIP4		|
+				   NETIF_F_GSO_IPXIP6		|
+				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
+				   NETIF_F_GSO_PARTIAL		|
+				   0;
+
+		if (!(vfres->vf_cap_flags &
+		      VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+			netdev->gso_partial_features |=
+				NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+		netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+		netdev->hw_enc_features |= hw_enc_features;
+	}
+	/* record features VLANs can make use of */
+	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+
+	/* Write features and hw_features separately to avoid polluting
+	 * with, or dropping, features that are set when we registered.
+	 */
+	hw_features = hw_enc_features;
+
+	/* Enable VLAN features if supported */
+	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+		hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
+				NETIF_F_HW_VLAN_CTAG_RX);
+	/* Enable cloud filter if ADQ is supported */
+	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
+		hw_features |= NETIF_F_HW_TC;
+
+	netdev->hw_features |= hw_features;
+
+	netdev->features |= hw_features;
+
+	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+
+	/* Do not turn on offloads when they are requested to be turned off.
+	 * TSO needs minimum 576 bytes to work correctly.
+	 */
+	if (netdev->wanted_features) {
+		if (!(netdev->wanted_features & NETIF_F_TSO) ||
+		    netdev->mtu < 576)
+			netdev->features &= ~NETIF_F_TSO;
+		if (!(netdev->wanted_features & NETIF_F_TSO6) ||
+		    netdev->mtu < 576)
+			netdev->features &= ~NETIF_F_TSO6;
+		if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
+			netdev->features &= ~NETIF_F_TSO_ECN;
+		if (!(netdev->wanted_features & NETIF_F_GRO))
+			netdev->features &= ~NETIF_F_GRO;
+		if (!(netdev->wanted_features & NETIF_F_GSO))
+			netdev->features &= ~NETIF_F_GSO;
+	}
+
+	adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+	adapter->vsi.back = adapter;
+	adapter->vsi.base_vector = 1;
+	adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+	vsi->netdev = adapter->netdev;
+	vsi->qs_handle = adapter->vsi_res->qset_handle;
+	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+		adapter->rss_key_size = vfres->rss_key_size;
+		adapter->rss_lut_size = vfres->rss_lut_size;
+	} else {
+		adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
+		adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
+	}
+
+	return 0;
+}
+
+/**
+ * i40evf_init_task - worker thread to perform delayed initialization
+ * @work: pointer to work_struct containing our data
+ *
+ * This task completes the work that was begun in probe. Due to the nature
+ * of VF-PF communications, we may need to wait tens of milliseconds to get
+ * responses back from the PF. Rather than busy-wait in probe and bog down the
+ * whole system, we'll do it in a task so we can sleep.
+ * This task only runs during driver init. Once we've established
+ * communications with the PF driver and set up our netdev, the watchdog
+ * takes over.
+ **/
+static void i40evf_init_task(struct work_struct *work)
+{
+	struct i40evf_adapter *adapter = container_of(work,
+						      struct i40evf_adapter,
+						      init_task.work);
+	struct net_device *netdev = adapter->netdev;
+	struct i40e_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int err, bufsz;
+
+	switch (adapter->state) {
+	case __I40EVF_STARTUP:
+		/* driver loaded, probe complete */
+		adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+		adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+		err = i40e_set_mac_type(hw);
+		if (err) {
+			dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
+				err);
+			goto err;
+		}
+		err = i40evf_check_reset_complete(hw);
+		if (err) {
+			dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
+				 err);
+			goto err;
+		}
+		hw->aq.num_arq_entries = I40EVF_AQ_LEN;
+		hw->aq.num_asq_entries = I40EVF_AQ_LEN;
+		hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+		hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+
+		err = i40evf_init_adminq(hw);
+		if (err) {
+			dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+				err);
+			goto err;
+		}
+		err = i40evf_send_api_ver(adapter);
+		if (err) {
+			dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
+			i40evf_shutdown_adminq(hw);
+			goto err;
+		}
+		adapter->state = __I40EVF_INIT_VERSION_CHECK;
+		goto restart;
+	case __I40EVF_INIT_VERSION_CHECK:
+		if (!i40evf_asq_done(hw)) {
+			dev_err(&pdev->dev, "Admin queue command never completed\n");
+			i40evf_shutdown_adminq(hw);
+			adapter->state = __I40EVF_STARTUP;
+			goto err;
+		}
+
+		/* aq msg sent, awaiting reply */
+		err = i40evf_verify_api_ver(adapter);
+		if (err) {
+			if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+				err = i40evf_send_api_ver(adapter);
+			else
+				dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+					adapter->pf_version.major,
+					adapter->pf_version.minor,
+					VIRTCHNL_VERSION_MAJOR,
+					VIRTCHNL_VERSION_MINOR);
+			goto err;
+		}
+		err = i40evf_send_vf_config_msg(adapter);
+		if (err) {
+			dev_err(&pdev->dev, "Unable to send config request (%d)\n",
+				err);
+			goto err;
+		}
+		adapter->state = __I40EVF_INIT_GET_RESOURCES;
+		goto restart;
+	case __I40EVF_INIT_GET_RESOURCES:
+		/* aq msg sent, awaiting reply */
+		if (!adapter->vf_res) {
+			bufsz = sizeof(struct virtchnl_vf_resource) +
+				(I40E_MAX_VF_VSI *
+				 sizeof(struct virtchnl_vsi_resource));
+			adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
+			if (!adapter->vf_res)
+				goto err;
+		}
+		err = i40evf_get_vf_config(adapter);
+		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+			err = i40evf_send_vf_config_msg(adapter);
+			goto err;
+		} else if (err == I40E_ERR_PARAM) {
+			/* We only get ERR_PARAM if the device is in a very bad
+			 * state or if we've been disabled for previous bad
+			 * behavior. Either way, we're done now.
+			 */
+			i40evf_shutdown_adminq(hw);
+			dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
+			return;
+		}
+		if (err) {
+			dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
+				err);
+			goto err_alloc;
+		}
+		adapter->state = __I40EVF_INIT_SW;
+		break;
+	default:
+		goto err_alloc;
+	}
+
+	if (i40evf_process_config(adapter))
+		goto err_alloc;
+	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+
+	adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
+
+	netdev->netdev_ops = &i40evf_netdev_ops;
+	i40evf_set_ethtool_ops(netdev);
+	netdev->watchdog_timeo = 5 * HZ;
+
+	/* MTU range: 68 - 9710 */
+	netdev->min_mtu = ETH_MIN_MTU;
+	netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
+
+	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+			 adapter->hw.mac.addr);
+		eth_hw_addr_random(netdev);
+		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+	} else {
+		adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
+		ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+		ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
+	}
+
+	timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0);
+	mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+	adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
+	adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
+	err = i40evf_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_sw_init;
+	i40evf_map_rings_to_vectors(adapter);
+	if (adapter->vf_res->vf_cap_flags &
+	    VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+		adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
+
+	err = i40evf_request_misc_irq(adapter);
+	if (err)
+		goto err_sw_init;
+
+	netif_carrier_off(netdev);
+	adapter->link_up = false;
+
+	if (!adapter->netdev_registered) {
+		err = register_netdev(netdev);
+		if (err)
+			goto err_register;
+	}
+
+	adapter->netdev_registered = true;
+
+	netif_tx_stop_all_queues(netdev);
+	if (CLIENT_ALLOWED(adapter)) {
+		err = i40evf_lan_add_device(adapter);
+		if (err)
+			dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
+				 err);
+	}
+
+	dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
+	if (netdev->features & NETIF_F_GRO)
+		dev_info(&pdev->dev, "GRO is enabled\n");
+
+	adapter->state = __I40EVF_DOWN;
+	set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+	i40evf_misc_irq_enable(adapter);
+	wake_up(&adapter->down_waitqueue);
+
+	adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
+	adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
+	if (!adapter->rss_key || !adapter->rss_lut)
+		goto err_mem;
+
+	if (RSS_AQ(adapter)) {
+		adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+		mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+	} else {
+		i40evf_init_rss(adapter);
+	}
+	return;
+restart:
+	schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
+	return;
+err_mem:
+	i40evf_free_rss(adapter);
+err_register:
+	i40evf_free_misc_irq(adapter);
+err_sw_init:
+	i40evf_reset_interrupt_capability(adapter);
+err_alloc:
+	kfree(adapter->vf_res);
+	adapter->vf_res = NULL;
+err:
+	/* Things went into the weeds, so try again later */
+	if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
+		dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
+		adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+		i40evf_shutdown_adminq(hw);
+		adapter->state = __I40EVF_STARTUP;
+		schedule_delayed_work(&adapter->init_task, HZ * 5);
+		return;
+	}
+	schedule_delayed_work(&adapter->init_task, HZ);
+}
+
+/**
+ * i40evf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void i40evf_shutdown(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev))
+		i40evf_close(netdev);
+
+	/* Prevent the watchdog from running. */
+	adapter->state = __I40EVF_REMOVE;
+	adapter->aq_required = 0;
+
+#ifdef CONFIG_PM
+	pci_save_state(pdev);
+
+#endif
+	pci_disable_device(pdev);
+}
+
+/**
+ * i40evf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40evf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * i40evf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct i40evf_adapter *adapter = NULL;
+	struct i40e_hw *hw = NULL;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (err) {
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev,
+				"DMA configuration failed: 0x%x\n", err);
+			goto err_dma;
+		}
+	}
+
+	err = pci_request_regions(pdev, i40evf_driver_name);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_request_regions failed 0x%x\n", err);
+		goto err_pci_reg;
+	}
+
+	pci_enable_pcie_error_reporting(pdev);
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
+				   I40EVF_MAX_REQ_QUEUES);
+	if (!netdev) {
+		err = -ENOMEM;
+		goto err_alloc_etherdev;
+	}
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+
+	hw = &adapter->hw;
+	hw->back = adapter;
+
+	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+	adapter->state = __I40EVF_STARTUP;
+
+	/* Call save state here because it relies on the adapter struct. */
+	pci_save_state(pdev);
+
+	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+			      pci_resource_len(pdev, 0));
+	if (!hw->hw_addr) {
+		err = -EIO;
+		goto err_ioremap;
+	}
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+	hw->bus.device = PCI_SLOT(pdev->devfn);
+	hw->bus.func = PCI_FUNC(pdev->devfn);
+	hw->bus.bus_id = pdev->bus->number;
+
+	/* set up the locks for the AQ, do this only once in probe
+	 * and destroy them only once in remove
+	 */
+	mutex_init(&hw->aq.asq_mutex);
+	mutex_init(&hw->aq.arq_mutex);
+
+	spin_lock_init(&adapter->mac_vlan_list_lock);
+	spin_lock_init(&adapter->cloud_filter_list_lock);
+
+	INIT_LIST_HEAD(&adapter->mac_filter_list);
+	INIT_LIST_HEAD(&adapter->vlan_filter_list);
+	INIT_LIST_HEAD(&adapter->cloud_filter_list);
+
+	INIT_WORK(&adapter->reset_task, i40evf_reset_task);
+	INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
+	INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+	INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
+	INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
+	schedule_delayed_work(&adapter->init_task,
+			      msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+
+	/* Setup the wait queue for indicating transition to down status */
+	init_waitqueue_head(&adapter->down_waitqueue);
+
+	return 0;
+
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_disable_pcie_error_reporting(pdev);
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40evf_suspend - Power management suspend routine
+ * @pdev: PCI device information struct
+ * @state: unused
+ *
+ * Called when the system (VM) is entering sleep/suspend.
+ **/
+static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int retval = 0;
+
+	netif_device_detach(netdev);
+
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
+
+	if (netif_running(netdev)) {
+		rtnl_lock();
+		i40evf_down(adapter);
+		rtnl_unlock();
+	}
+	i40evf_free_misc_irq(adapter);
+	i40evf_reset_interrupt_capability(adapter);
+
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+/**
+ * i40evf_resume - Power management resume routine
+ * @pdev: PCI device information struct
+ *
+ * Called when the system (VM) is resumed from sleep/suspend.
+ **/
+static int i40evf_resume(struct pci_dev *pdev)
+{
+	struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/* pci_restore_state clears dev->state_saved so call
+	 * pci_save_state to restore it.
+	 */
+	pci_save_state(pdev);
+
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	rtnl_lock();
+	err = i40evf_set_interrupt_capability(adapter);
+	if (err) {
+		rtnl_unlock();
+		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
+		return err;
+	}
+	err = i40evf_request_misc_irq(adapter);
+	rtnl_unlock();
+	if (err) {
+		dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
+		return err;
+	}
+
+	schedule_work(&adapter->reset_task);
+
+	netif_device_attach(netdev);
+
+	return err;
+}
+
+#endif /* CONFIG_PM */
+/**
+ * i40evf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * i40evf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40evf_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40evf_vlan_filter *vlf, *vlftmp;
+	struct i40evf_mac_filter *f, *ftmp;
+	struct i40evf_cloud_filter *cf, *cftmp;
+	struct i40e_hw *hw = &adapter->hw;
+	int err;
+	/* Indicate we are in remove and not to run reset_task */
+	set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
+	cancel_delayed_work_sync(&adapter->init_task);
+	cancel_work_sync(&adapter->reset_task);
+	cancel_delayed_work_sync(&adapter->client_task);
+	if (adapter->netdev_registered) {
+		unregister_netdev(netdev);
+		adapter->netdev_registered = false;
+	}
+	if (CLIENT_ALLOWED(adapter)) {
+		err = i40evf_lan_del_device(adapter);
+		if (err)
+			dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+				 err);
+	}
+
+	/* Shut down all the garbage mashers on the detention level */
+	adapter->state = __I40EVF_REMOVE;
+	adapter->aq_required = 0;
+	adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+	i40evf_request_reset(adapter);
+	msleep(50);
+	/* If the FW isn't responding, kick it once, but only once. */
+	if (!i40evf_asq_done(hw)) {
+		i40evf_request_reset(adapter);
+		msleep(50);
+	}
+	i40evf_free_all_tx_resources(adapter);
+	i40evf_free_all_rx_resources(adapter);
+	i40evf_misc_irq_disable(adapter);
+	i40evf_free_misc_irq(adapter);
+	i40evf_reset_interrupt_capability(adapter);
+	i40evf_free_q_vectors(adapter);
+
+	if (adapter->watchdog_timer.function)
+		del_timer_sync(&adapter->watchdog_timer);
+
+	cancel_work_sync(&adapter->adminq_task);
+
+	i40evf_free_rss(adapter);
+
+	if (hw->aq.asq.count)
+		i40evf_shutdown_adminq(hw);
+
+	/* destroy the locks only once, here */
+	mutex_destroy(&hw->aq.arq_mutex);
+	mutex_destroy(&hw->aq.asq_mutex);
+
+	iounmap(hw->hw_addr);
+	pci_release_regions(pdev);
+	i40evf_free_all_tx_resources(adapter);
+	i40evf_free_all_rx_resources(adapter);
+	i40evf_free_queues(adapter);
+	kfree(adapter->vf_res);
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+	/* If we got removed before an up/down sequence, we've got a filter
+	 * hanging out there that we need to get rid of.
+	 */
+	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+		list_del(&f->list);
+		kfree(f);
+	}
+	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
+				 list) {
+		list_del(&vlf->list);
+		kfree(vlf);
+	}
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	spin_lock_bh(&adapter->cloud_filter_list_lock);
+	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
+		list_del(&cf->list);
+		kfree(cf);
+	}
+	spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
+	free_netdev(netdev);
+
+	pci_disable_pcie_error_reporting(pdev);
+
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver i40evf_driver = {
+	.name     = i40evf_driver_name,
+	.id_table = i40evf_pci_tbl,
+	.probe    = i40evf_probe,
+	.remove   = i40evf_remove,
+#ifdef CONFIG_PM
+	.suspend  = i40evf_suspend,
+	.resume   = i40evf_resume,
+#endif
+	.shutdown = i40evf_shutdown,
+};
+
+/**
+ * i40e_init_module - Driver Registration Routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40evf_init_module(void)
+{
+	int ret;
+
+	pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
+		i40evf_driver_version);
+
+	pr_info("%s\n", i40evf_copyright);
+
+	i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
+				    i40evf_driver_name);
+	if (!i40evf_wq) {
+		pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
+		return -ENOMEM;
+	}
+	ret = pci_register_driver(&i40evf_driver);
+	return ret;
+}
+
+module_init(i40evf_init_module);
+
+/**
+ * i40e_exit_module - Driver Exit Cleanup Routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40evf_exit_module(void)
+{
+	pci_unregister_driver(&i40evf_driver);
+	destroy_workqueue(i40evf_wq);
+}
+
+module_exit(i40evf_exit_module);
+
+/* i40evf_main.c */
diff --git a/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c
new file mode 100644
index 000000000000..6579dabab78c
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c
@@ -0,0 +1,1465 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+#include "i40evf_client.h"
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+
+/**
+ * i40evf_send_pf_msg
+ * @adapter: adapter structure
+ * @op: virtual channel opcode
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Send message to PF and print status if failure.
+ **/
+static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
+			      enum virtchnl_ops op, u8 *msg, u16 len)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	i40e_status err;
+
+	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+		return 0; /* nothing to see here, move along */
+
+	err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+	if (err)
+		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+			op, i40evf_stat_str(hw, err),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
+	return err;
+}
+
+/**
+ * i40evf_send_api_ver
+ * @adapter: adapter structure
+ *
+ * Send API version admin queue message to the PF. The reply is not checked
+ * in this function. Returns 0 if the message was successfully
+ * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_api_ver(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_version_info vvi;
+
+	vvi.major = VIRTCHNL_VERSION_MAJOR;
+	vvi.minor = VIRTCHNL_VERSION_MINOR;
+
+	return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+				  sizeof(vvi));
+}
+
+/**
+ * i40evf_verify_api_ver
+ * @adapter: adapter structure
+ *
+ * Compare API versions with the PF. Must be called after admin queue is
+ * initialized. Returns 0 if API versions match, -EIO if they do not,
+ * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
+ * from the firmware are propagated.
+ **/
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_version_info *pf_vvi;
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40e_arq_event_info event;
+	enum virtchnl_ops op;
+	i40e_status err;
+
+	event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
+	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+	if (!event.msg_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	while (1) {
+		err = i40evf_clean_arq_element(hw, &event, NULL);
+		/* When the AQ is empty, i40evf_clean_arq_element will return
+		 * nonzero and this loop will terminate.
+		 */
+		if (err)
+			goto out_alloc;
+		op =
+		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+		if (op == VIRTCHNL_OP_VERSION)
+			break;
+	}
+
+
+	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+	if (err)
+		goto out_alloc;
+
+	if (op != VIRTCHNL_OP_VERSION) {
+		dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
+			op);
+		err = -EIO;
+		goto out_alloc;
+	}
+
+	pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
+	adapter->pf_version = *pf_vvi;
+
+	if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
+	    ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
+	     (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
+		err = -EIO;
+
+out_alloc:
+	kfree(event.msg_buf);
+out:
+	return err;
+}
+
+/**
+ * i40evf_send_vf_config_msg
+ * @adapter: adapter structure
+ *
+ * Send VF configuration request admin queue message to the PF. The reply
+ * is not checked in this function. Returns 0 if the message was
+ * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
+{
+	u32 caps;
+
+	caps = VIRTCHNL_VF_OFFLOAD_L2 |
+	       VIRTCHNL_VF_OFFLOAD_RSS_PF |
+	       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+	       VIRTCHNL_VF_OFFLOAD_RSS_REG |
+	       VIRTCHNL_VF_OFFLOAD_VLAN |
+	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
+	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
+	       VIRTCHNL_VF_OFFLOAD_ENCAP |
+	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
+	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+	       VIRTCHNL_VF_OFFLOAD_ADQ;
+
+	adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+	if (PF_IS_V11(adapter))
+		return i40evf_send_pf_msg(adapter,
+					  VIRTCHNL_OP_GET_VF_RESOURCES,
+					  (u8 *)&caps, sizeof(caps));
+	else
+		return i40evf_send_pf_msg(adapter,
+					  VIRTCHNL_OP_GET_VF_RESOURCES,
+					  NULL, 0);
+}
+
+/**
+ * i40evf_validate_num_queues
+ * @adapter: adapter structure
+ *
+ * Validate that the number of queues the PF has sent in
+ * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
+ **/
+static void i40evf_validate_num_queues(struct i40evf_adapter *adapter)
+{
+	if (adapter->vf_res->num_queue_pairs > I40EVF_MAX_REQ_QUEUES) {
+		struct virtchnl_vsi_resource *vsi_res;
+		int i;
+
+		dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
+			 adapter->vf_res->num_queue_pairs,
+			 I40EVF_MAX_REQ_QUEUES);
+		dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
+			 I40EVF_MAX_REQ_QUEUES);
+		adapter->vf_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES;
+		for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+			vsi_res = &adapter->vf_res->vsi_res[i];
+			vsi_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES;
+		}
+	}
+}
+
+/**
+ * i40evf_get_vf_config
+ * @adapter: private adapter structure
+ *
+ * Get VF configuration from PF and populate hw structure. Must be called after
+ * admin queue is initialized. Busy waits until response is received from PF,
+ * with maximum timeout. Response from PF is returned in the buffer for further
+ * processing by the caller.
+ **/
+int i40evf_get_vf_config(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40e_arq_event_info event;
+	enum virtchnl_ops op;
+	i40e_status err;
+	u16 len;
+
+	len =  sizeof(struct virtchnl_vf_resource) +
+		I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
+	event.buf_len = len;
+	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+	if (!event.msg_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	while (1) {
+		/* When the AQ is empty, i40evf_clean_arq_element will return
+		 * nonzero and this loop will terminate.
+		 */
+		err = i40evf_clean_arq_element(hw, &event, NULL);
+		if (err)
+			goto out_alloc;
+		op =
+		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+		if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
+			break;
+	}
+
+	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
+
+	/* some PFs send more queues than we should have so validate that
+	 * we aren't getting too many queues
+	 */
+	if (!err)
+		i40evf_validate_num_queues(adapter);
+	i40e_vf_parse_hw_config(hw, adapter->vf_res);
+out_alloc:
+	kfree(event.msg_buf);
+out:
+	return err;
+}
+
+/**
+ * i40evf_configure_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF set up our (previously allocated) queues.
+ **/
+void i40evf_configure_queues(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_vsi_queue_config_info *vqci;
+	struct virtchnl_queue_pair_info *vqpi;
+	int pairs = adapter->num_active_queues;
+	int i, len, max_frame = I40E_MAX_RXBUFFER;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+	len = sizeof(struct virtchnl_vsi_queue_config_info) +
+		       (sizeof(struct virtchnl_queue_pair_info) * pairs);
+	vqci = kzalloc(len, GFP_KERNEL);
+	if (!vqci)
+		return;
+
+	/* Limit maximum frame size when jumbo frames is not enabled */
+	if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
+	    (adapter->netdev->mtu <= ETH_DATA_LEN))
+		max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+
+	vqci->vsi_id = adapter->vsi_res->vsi_id;
+	vqci->num_queue_pairs = pairs;
+	vqpi = vqci->qpair;
+	/* Size check is not needed here - HW max is 16 queue pairs, and we
+	 * can fit info for 31 of them into the AQ buffer before it overflows.
+	 */
+	for (i = 0; i < pairs; i++) {
+		vqpi->txq.vsi_id = vqci->vsi_id;
+		vqpi->txq.queue_id = i;
+		vqpi->txq.ring_len = adapter->tx_rings[i].count;
+		vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
+		vqpi->rxq.vsi_id = vqci->vsi_id;
+		vqpi->rxq.queue_id = i;
+		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
+		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
+		vqpi->rxq.max_pkt_size = max_frame;
+		vqpi->rxq.databuffer_size =
+			ALIGN(adapter->rx_rings[i].rx_buf_len,
+			      BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
+		vqpi++;
+	}
+
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+			   (u8 *)vqci, len);
+	kfree(vqci);
+}
+
+/**
+ * i40evf_enable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable all of our queues.
+ **/
+void i40evf_enable_queues(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_queue_select vqs;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
+	vqs.vsi_id = adapter->vsi_res->vsi_id;
+	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
+	vqs.rx_queues = vqs.tx_queues;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
+			   (u8 *)&vqs, sizeof(vqs));
+}
+
+/**
+ * i40evf_disable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable all of our queues.
+ **/
+void i40evf_disable_queues(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_queue_select vqs;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
+	vqs.vsi_id = adapter->vsi_res->vsi_id;
+	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
+	vqs.rx_queues = vqs.tx_queues;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
+			   (u8 *)&vqs, sizeof(vqs));
+}
+
+/**
+ * i40evf_map_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF map queues to interrupt vectors. Misc causes, including
+ * admin queue, are always mapped to vector 0.
+ **/
+void i40evf_map_queues(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_irq_map_info *vimi;
+	struct virtchnl_vector_map *vecmap;
+	int v_idx, q_vectors, len;
+	struct i40e_q_vector *q_vector;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+
+	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	len = sizeof(struct virtchnl_irq_map_info) +
+	      (adapter->num_msix_vectors *
+		sizeof(struct virtchnl_vector_map));
+	vimi = kzalloc(len, GFP_KERNEL);
+	if (!vimi)
+		return;
+
+	vimi->num_vectors = adapter->num_msix_vectors;
+	/* Queue vectors first */
+	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+		q_vector = &adapter->q_vectors[v_idx];
+		vecmap = &vimi->vecmap[v_idx];
+
+		vecmap->vsi_id = adapter->vsi_res->vsi_id;
+		vecmap->vector_id = v_idx + NONQ_VECS;
+		vecmap->txq_map = q_vector->ring_mask;
+		vecmap->rxq_map = q_vector->ring_mask;
+		vecmap->rxitr_idx = I40E_RX_ITR;
+		vecmap->txitr_idx = I40E_TX_ITR;
+	}
+	/* Misc vector last - this is only for AdminQ messages */
+	vecmap = &vimi->vecmap[v_idx];
+	vecmap->vsi_id = adapter->vsi_res->vsi_id;
+	vecmap->vector_id = 0;
+	vecmap->txq_map = 0;
+	vecmap->rxq_map = 0;
+
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
+			   (u8 *)vimi, len);
+	kfree(vimi);
+}
+
+/**
+ * i40evf_request_queues
+ * @adapter: adapter structure
+ * @num: number of requested queues
+ *
+ * We get a default number of queues from the PF.  This enables us to request a
+ * different number.  Returns 0 on success, negative on failure
+ **/
+int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
+{
+	struct virtchnl_vf_res_request vfres;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
+			adapter->current_op);
+		return -EBUSY;
+	}
+
+	vfres.num_queue_pairs = num;
+
+	adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
+	adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+	return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
+				  (u8 *)&vfres, sizeof(vfres));
+}
+
+/**
+ * i40evf_add_ether_addrs
+ * @adapter: adapter structure
+ *
+ * Request that the PF add one or more addresses to our filters.
+ **/
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_ether_addr_list *veal;
+	int len, i = 0, count = 0;
+	struct i40evf_mac_filter *f;
+	bool more = false;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		if (f->add)
+			count++;
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
+
+	len = sizeof(struct virtchnl_ether_addr_list) +
+	      (count * sizeof(struct virtchnl_ether_addr));
+	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
+		count = (I40EVF_MAX_AQ_BUF_SIZE -
+			 sizeof(struct virtchnl_ether_addr_list)) /
+			sizeof(struct virtchnl_ether_addr);
+		len = sizeof(struct virtchnl_ether_addr_list) +
+		      (count * sizeof(struct virtchnl_ether_addr));
+		more = true;
+	}
+
+	veal = kzalloc(len, GFP_ATOMIC);
+	if (!veal) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+		return;
+	}
+
+	veal->vsi_id = adapter->vsi_res->vsi_id;
+	veal->num_elements = count;
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		if (f->add) {
+			ether_addr_copy(veal->list[i].addr, f->macaddr);
+			i++;
+			f->add = false;
+			if (i == count)
+				break;
+		}
+	}
+	if (!more)
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
+			   (u8 *)veal, len);
+	kfree(veal);
+}
+
+/**
+ * i40evf_del_ether_addrs
+ * @adapter: adapter structure
+ *
+ * Request that the PF remove one or more addresses from our filters.
+ **/
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_ether_addr_list *veal;
+	struct i40evf_mac_filter *f, *ftmp;
+	int len, i = 0, count = 0;
+	bool more = false;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		if (f->remove)
+			count++;
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+
+	len = sizeof(struct virtchnl_ether_addr_list) +
+	      (count * sizeof(struct virtchnl_ether_addr));
+	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
+		count = (I40EVF_MAX_AQ_BUF_SIZE -
+			 sizeof(struct virtchnl_ether_addr_list)) /
+			sizeof(struct virtchnl_ether_addr);
+		len = sizeof(struct virtchnl_ether_addr_list) +
+		      (count * sizeof(struct virtchnl_ether_addr));
+		more = true;
+	}
+	veal = kzalloc(len, GFP_ATOMIC);
+	if (!veal) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+		return;
+	}
+
+	veal->vsi_id = adapter->vsi_res->vsi_id;
+	veal->num_elements = count;
+	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+		if (f->remove) {
+			ether_addr_copy(veal->list[i].addr, f->macaddr);
+			i++;
+			list_del(&f->list);
+			kfree(f);
+			if (i == count)
+				break;
+		}
+	}
+	if (!more)
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
+			   (u8 *)veal, len);
+	kfree(veal);
+}
+
+/**
+ * i40evf_add_vlans
+ * @adapter: adapter structure
+ *
+ * Request that the PF add one or more VLAN filters to our VSI.
+ **/
+void i40evf_add_vlans(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_vlan_filter_list *vvfl;
+	int len, i = 0, count = 0;
+	struct i40evf_vlan_filter *f;
+	bool more = false;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+		if (f->add)
+			count++;
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
+
+	len = sizeof(struct virtchnl_vlan_filter_list) +
+	      (count * sizeof(u16));
+	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+		dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+		count = (I40EVF_MAX_AQ_BUF_SIZE -
+			 sizeof(struct virtchnl_vlan_filter_list)) /
+			sizeof(u16);
+		len = sizeof(struct virtchnl_vlan_filter_list) +
+		      (count * sizeof(u16));
+		more = true;
+	}
+	vvfl = kzalloc(len, GFP_ATOMIC);
+	if (!vvfl) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+		return;
+	}
+
+	vvfl->vsi_id = adapter->vsi_res->vsi_id;
+	vvfl->num_elements = count;
+	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+		if (f->add) {
+			vvfl->vlan_id[i] = f->vlan;
+			i++;
+			f->add = false;
+			if (i == count)
+				break;
+		}
+	}
+	if (!more)
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+	kfree(vvfl);
+}
+
+/**
+ * i40evf_del_vlans
+ * @adapter: adapter structure
+ *
+ * Request that the PF remove one or more VLAN filters from our VSI.
+ **/
+void i40evf_del_vlans(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_vlan_filter_list *vvfl;
+	struct i40evf_vlan_filter *f, *ftmp;
+	int len, i = 0, count = 0;
+	bool more = false;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+		if (f->remove)
+			count++;
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
+
+	len = sizeof(struct virtchnl_vlan_filter_list) +
+	      (count * sizeof(u16));
+	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+		dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
+		count = (I40EVF_MAX_AQ_BUF_SIZE -
+			 sizeof(struct virtchnl_vlan_filter_list)) /
+			sizeof(u16);
+		len = sizeof(struct virtchnl_vlan_filter_list) +
+		      (count * sizeof(u16));
+		more = true;
+	}
+	vvfl = kzalloc(len, GFP_ATOMIC);
+	if (!vvfl) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+		return;
+	}
+
+	vvfl->vsi_id = adapter->vsi_res->vsi_id;
+	vvfl->num_elements = count;
+	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+		if (f->remove) {
+			vvfl->vlan_id[i] = f->vlan;
+			i++;
+			list_del(&f->list);
+			kfree(f);
+			if (i == count)
+				break;
+		}
+	}
+	if (!more)
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+	kfree(vvfl);
+}
+
+/**
+ * i40evf_set_promiscuous
+ * @adapter: adapter structure
+ * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
+{
+	struct virtchnl_promisc_info vpi;
+	int promisc_all;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+
+	promisc_all = FLAG_VF_UNICAST_PROMISC |
+		      FLAG_VF_MULTICAST_PROMISC;
+	if ((flags & promisc_all) == promisc_all) {
+		adapter->flags |= I40EVF_FLAG_PROMISC_ON;
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
+		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+	}
+
+	if (flags & FLAG_VF_MULTICAST_PROMISC) {
+		adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+	}
+
+	if (!flags) {
+		adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
+				    I40EVF_FLAG_ALLMULTI_ON);
+		adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
+					  I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
+		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+	}
+
+	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+	vpi.vsi_id = adapter->vsi_res->vsi_id;
+	vpi.flags = flags;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+			   (u8 *)&vpi, sizeof(vpi));
+}
+
+/**
+ * i40evf_request_stats
+ * @adapter: adapter structure
+ *
+ * Request VSI statistics from PF.
+ **/
+void i40evf_request_stats(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_queue_select vqs;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* no error message, this isn't crucial */
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_GET_STATS;
+	vqs.vsi_id = adapter->vsi_res->vsi_id;
+	/* queue maps are ignored for this message - only the vsi is used */
+	if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
+			       (u8 *)&vqs, sizeof(vqs)))
+		/* if the request failed, don't lock out others */
+		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+}
+
+/**
+ * i40evf_get_hena
+ * @adapter: adapter structure
+ *
+ * Request hash enable capabilities from PF
+ **/
+void i40evf_get_hena(struct i40evf_adapter *adapter)
+{
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+			   NULL, 0);
+}
+
+/**
+ * i40evf_set_hena
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash capabilities
+ **/
+void i40evf_set_hena(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_rss_hena vrh;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	vrh.hena = adapter->hena;
+	adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
+			   (u8 *)&vrh, sizeof(vrh));
+}
+
+/**
+ * i40evf_set_rss_key
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash key
+ **/
+void i40evf_set_rss_key(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_rss_key *vrk;
+	int len;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	len = sizeof(struct virtchnl_rss_key) +
+	      (adapter->rss_key_size * sizeof(u8)) - 1;
+	vrk = kzalloc(len, GFP_KERNEL);
+	if (!vrk)
+		return;
+	vrk->vsi_id = adapter->vsi.id;
+	vrk->key_len = adapter->rss_key_size;
+	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
+
+	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
+			   (u8 *)vrk, len);
+	kfree(vrk);
+}
+
+/**
+ * i40evf_set_rss_lut
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS lookup table
+ **/
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_rss_lut *vrl;
+	int len;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	len = sizeof(struct virtchnl_rss_lut) +
+	      (adapter->rss_lut_size * sizeof(u8)) - 1;
+	vrl = kzalloc(len, GFP_KERNEL);
+	if (!vrl)
+		return;
+	vrl->vsi_id = adapter->vsi.id;
+	vrl->lut_entries = adapter->rss_lut_size;
+	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
+	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
+			   (u8 *)vrl, len);
+	kfree(vrl);
+}
+
+/**
+ * i40evf_enable_vlan_stripping
+ * @adapter: adapter structure
+ *
+ * Request VLAN header stripping to be enabled
+ **/
+void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter)
+{
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+			   NULL, 0);
+}
+
+/**
+ * i40evf_disable_vlan_stripping
+ * @adapter: adapter structure
+ *
+ * Request VLAN header stripping to be disabled
+ **/
+void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter)
+{
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+			   NULL, 0);
+}
+
+/**
+ * i40evf_print_link_message - print link up or down
+ * @adapter: adapter structure
+ *
+ * Log a message telling the world of our wonderous link status
+ */
+static void i40evf_print_link_message(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	char *speed = "Unknown ";
+
+	if (!adapter->link_up) {
+		netdev_info(netdev, "NIC Link is Down\n");
+		return;
+	}
+
+	switch (adapter->link_speed) {
+	case I40E_LINK_SPEED_40GB:
+		speed = "40 G";
+		break;
+	case I40E_LINK_SPEED_25GB:
+		speed = "25 G";
+		break;
+	case I40E_LINK_SPEED_20GB:
+		speed = "20 G";
+		break;
+	case I40E_LINK_SPEED_10GB:
+		speed = "10 G";
+		break;
+	case I40E_LINK_SPEED_1GB:
+		speed = "1000 M";
+		break;
+	case I40E_LINK_SPEED_100MB:
+		speed = "100 M";
+		break;
+	default:
+		break;
+	}
+
+	netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
+}
+
+/**
+ * i40evf_enable_channel
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable channels as specified by
+ * the user via tc tool.
+ **/
+void i40evf_enable_channels(struct i40evf_adapter *adapter)
+{
+	struct virtchnl_tc_info *vti = NULL;
+	u16 len;
+	int i;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+
+	len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
+	       sizeof(struct virtchnl_tc_info);
+
+	vti = kzalloc(len, GFP_KERNEL);
+	if (!vti)
+		return;
+	vti->num_tc = adapter->num_tc;
+	for (i = 0; i < vti->num_tc; i++) {
+		vti->list[i].count = adapter->ch_config.ch_info[i].count;
+		vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
+		vti->list[i].pad = 0;
+		vti->list[i].max_tx_rate =
+				adapter->ch_config.ch_info[i].max_tx_rate;
+	}
+
+	adapter->ch_config.state = __I40EVF_TC_RUNNING;
+	adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+	adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS,
+			   (u8 *)vti, len);
+	kfree(vti);
+}
+
+/**
+ * i40evf_disable_channel
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable channels that are configured
+ **/
+void i40evf_disable_channels(struct i40evf_adapter *adapter)
+{
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+
+	adapter->ch_config.state = __I40EVF_TC_INVALID;
+	adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+	adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS;
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS,
+			   NULL, 0);
+}
+
+/**
+ * i40evf_print_cloud_filter
+ * @adapter: adapter structure
+ * @f: cloud filter to print
+ *
+ * Print the cloud filter
+ **/
+static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter,
+				      struct virtchnl_filter *f)
+{
+	switch (f->flow_type) {
+	case VIRTCHNL_TCP_V4_FLOW:
+		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
+			 &f->data.tcp_spec.dst_mac,
+			 &f->data.tcp_spec.src_mac,
+			 ntohs(f->data.tcp_spec.vlan_id),
+			 &f->data.tcp_spec.dst_ip[0],
+			 &f->data.tcp_spec.src_ip[0],
+			 ntohs(f->data.tcp_spec.dst_port),
+			 ntohs(f->data.tcp_spec.src_port));
+		break;
+	case VIRTCHNL_TCP_V6_FLOW:
+		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
+			 &f->data.tcp_spec.dst_mac,
+			 &f->data.tcp_spec.src_mac,
+			 ntohs(f->data.tcp_spec.vlan_id),
+			 &f->data.tcp_spec.dst_ip,
+			 &f->data.tcp_spec.src_ip,
+			 ntohs(f->data.tcp_spec.dst_port),
+			 ntohs(f->data.tcp_spec.src_port));
+		break;
+	}
+}
+
+/**
+ * i40evf_add_cloud_filter
+ * @adapter: adapter structure
+ *
+ * Request that the PF add cloud filters as specified
+ * by the user via tc tool.
+ **/
+void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
+{
+	struct i40evf_cloud_filter *cf;
+	struct virtchnl_filter *f;
+	int len = 0, count = 0;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+		if (cf->add) {
+			count++;
+			break;
+		}
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
+
+	len = sizeof(struct virtchnl_filter);
+	f = kzalloc(len, GFP_KERNEL);
+	if (!f)
+		return;
+
+	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+		if (cf->add) {
+			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
+			cf->add = false;
+			cf->state = __I40EVF_CF_ADD_PENDING;
+			i40evf_send_pf_msg(adapter,
+					   VIRTCHNL_OP_ADD_CLOUD_FILTER,
+					   (u8 *)f, len);
+		}
+	}
+	kfree(f);
+}
+
+/**
+ * i40evf_del_cloud_filter
+ * @adapter: adapter structure
+ *
+ * Request that the PF delete cloud filters as specified
+ * by the user via tc tool.
+ **/
+void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
+{
+	struct i40evf_cloud_filter *cf, *cftmp;
+	struct virtchnl_filter *f;
+	int len = 0, count = 0;
+
+	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+		if (cf->del) {
+			count++;
+			break;
+		}
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
+		return;
+	}
+	adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
+
+	len = sizeof(struct virtchnl_filter);
+	f = kzalloc(len, GFP_KERNEL);
+	if (!f)
+		return;
+
+	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
+		if (cf->del) {
+			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
+			cf->del = false;
+			cf->state = __I40EVF_CF_DEL_PENDING;
+			i40evf_send_pf_msg(adapter,
+					   VIRTCHNL_OP_DEL_CLOUD_FILTER,
+					   (u8 *)f, len);
+		}
+	}
+	kfree(f);
+}
+
+/**
+ * i40evf_request_reset
+ * @adapter: adapter structure
+ *
+ * Request that the PF reset this VF. No response is expected.
+ **/
+void i40evf_request_reset(struct i40evf_adapter *adapter)
+{
+	/* Don't check CURRENT_OP - this is always higher priority */
+	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+}
+
+/**
+ * i40evf_virtchnl_completion
+ * @adapter: adapter structure
+ * @v_opcode: opcode sent by PF
+ * @v_retval: retval sent by PF
+ * @msg: message sent by PF
+ * @msglen: message length
+ *
+ * Asynchronous completion function for admin queue messages. Rather than busy
+ * wait, we fire off our requests and assume that no errors will be returned.
+ * This function handles the reply messages.
+ **/
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+				enum virtchnl_ops v_opcode,
+				i40e_status v_retval,
+				u8 *msg, u16 msglen)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (v_opcode == VIRTCHNL_OP_EVENT) {
+		struct virtchnl_pf_event *vpe =
+			(struct virtchnl_pf_event *)msg;
+		bool link_up = vpe->event_data.link_event.link_status;
+		switch (vpe->event) {
+		case VIRTCHNL_EVENT_LINK_CHANGE:
+			adapter->link_speed =
+				vpe->event_data.link_event.link_speed;
+
+			/* we've already got the right link status, bail */
+			if (adapter->link_up == link_up)
+				break;
+
+			if (link_up) {
+				/* If we get link up message and start queues
+				 * before our queues are configured it will
+				 * trigger a TX hang. In that case, just ignore
+				 * the link status message,we'll get another one
+				 * after we enable queues and actually prepared
+				 * to send traffic.
+				 */
+				if (adapter->state != __I40EVF_RUNNING)
+					break;
+
+				/* For ADq enabled VF, we reconfigure VSIs and
+				 * re-allocate queues. Hence wait till all
+				 * queues are enabled.
+				 */
+				if (adapter->flags &
+				    I40EVF_FLAG_QUEUES_DISABLED)
+					break;
+			}
+
+			adapter->link_up = link_up;
+			if (link_up) {
+				netif_tx_start_all_queues(netdev);
+				netif_carrier_on(netdev);
+			} else {
+				netif_tx_stop_all_queues(netdev);
+				netif_carrier_off(netdev);
+			}
+			i40evf_print_link_message(adapter);
+			break;
+		case VIRTCHNL_EVENT_RESET_IMPENDING:
+			dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
+			if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+				adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+				schedule_work(&adapter->reset_task);
+			}
+			break;
+		default:
+			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
+				vpe->event);
+			break;
+		}
+		return;
+	}
+	if (v_retval) {
+		switch (v_opcode) {
+		case VIRTCHNL_OP_ADD_VLAN:
+			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
+				i40evf_stat_str(&adapter->hw, v_retval));
+			break;
+		case VIRTCHNL_OP_ADD_ETH_ADDR:
+			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
+				i40evf_stat_str(&adapter->hw, v_retval));
+			break;
+		case VIRTCHNL_OP_DEL_VLAN:
+			dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
+				i40evf_stat_str(&adapter->hw, v_retval));
+			break;
+		case VIRTCHNL_OP_DEL_ETH_ADDR:
+			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
+				i40evf_stat_str(&adapter->hw, v_retval));
+			break;
+		case VIRTCHNL_OP_ENABLE_CHANNELS:
+			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
+				i40evf_stat_str(&adapter->hw, v_retval));
+			adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+			adapter->ch_config.state = __I40EVF_TC_INVALID;
+			netdev_reset_tc(netdev);
+			netif_tx_start_all_queues(netdev);
+			break;
+		case VIRTCHNL_OP_DISABLE_CHANNELS:
+			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
+				i40evf_stat_str(&adapter->hw, v_retval));
+			adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+			adapter->ch_config.state = __I40EVF_TC_RUNNING;
+			netif_tx_start_all_queues(netdev);
+			break;
+		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
+			struct i40evf_cloud_filter *cf, *cftmp;
+
+			list_for_each_entry_safe(cf, cftmp,
+						 &adapter->cloud_filter_list,
+						 list) {
+				if (cf->state == __I40EVF_CF_ADD_PENDING) {
+					cf->state = __I40EVF_CF_INVALID;
+					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
+						 i40evf_stat_str(&adapter->hw,
+								 v_retval));
+					i40evf_print_cloud_filter(adapter,
+								  &cf->f);
+					list_del(&cf->list);
+					kfree(cf);
+					adapter->num_cloud_filters--;
+				}
+			}
+			}
+			break;
+		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
+			struct i40evf_cloud_filter *cf;
+
+			list_for_each_entry(cf, &adapter->cloud_filter_list,
+					    list) {
+				if (cf->state == __I40EVF_CF_DEL_PENDING) {
+					cf->state = __I40EVF_CF_ACTIVE;
+					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
+						 i40evf_stat_str(&adapter->hw,
+								 v_retval));
+					i40evf_print_cloud_filter(adapter,
+								  &cf->f);
+				}
+			}
+			}
+			break;
+		default:
+			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
+				v_retval,
+				i40evf_stat_str(&adapter->hw, v_retval),
+				v_opcode);
+		}
+	}
+	switch (v_opcode) {
+	case VIRTCHNL_OP_GET_STATS: {
+		struct i40e_eth_stats *stats =
+			(struct i40e_eth_stats *)msg;
+		netdev->stats.rx_packets = stats->rx_unicast +
+					   stats->rx_multicast +
+					   stats->rx_broadcast;
+		netdev->stats.tx_packets = stats->tx_unicast +
+					   stats->tx_multicast +
+					   stats->tx_broadcast;
+		netdev->stats.rx_bytes = stats->rx_bytes;
+		netdev->stats.tx_bytes = stats->tx_bytes;
+		netdev->stats.tx_errors = stats->tx_errors;
+		netdev->stats.rx_dropped = stats->rx_discards;
+		netdev->stats.tx_dropped = stats->tx_discards;
+		adapter->current_stats = *stats;
+		}
+		break;
+	case VIRTCHNL_OP_GET_VF_RESOURCES: {
+		u16 len = sizeof(struct virtchnl_vf_resource) +
+			  I40E_MAX_VF_VSI *
+			  sizeof(struct virtchnl_vsi_resource);
+		memcpy(adapter->vf_res, msg, min(msglen, len));
+		i40evf_validate_num_queues(adapter);
+		i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+		if (is_zero_ether_addr(adapter->hw.mac.addr)) {
+			/* restore current mac address */
+			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+		} else {
+			/* refresh current mac address if changed */
+			ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+			ether_addr_copy(netdev->perm_addr,
+					adapter->hw.mac.addr);
+		}
+		i40evf_process_config(adapter);
+		}
+		break;
+	case VIRTCHNL_OP_ENABLE_QUEUES:
+		/* enable transmits */
+		i40evf_irq_enable(adapter, true);
+		adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED;
+		break;
+	case VIRTCHNL_OP_DISABLE_QUEUES:
+		i40evf_free_all_tx_resources(adapter);
+		i40evf_free_all_rx_resources(adapter);
+		if (adapter->state == __I40EVF_DOWN_PENDING) {
+			adapter->state = __I40EVF_DOWN;
+			wake_up(&adapter->down_waitqueue);
+		}
+		break;
+	case VIRTCHNL_OP_VERSION:
+	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		/* Don't display an error if we get these out of sequence.
+		 * If the firmware needed to get kicked, we'll get these and
+		 * it's no problem.
+		 */
+		if (v_opcode != adapter->current_op)
+			return;
+		break;
+	case VIRTCHNL_OP_IWARP:
+		/* Gobble zero-length replies from the PF. They indicate that
+		 * a previous message was received OK, and the client doesn't
+		 * care about that.
+		 */
+		if (msglen && CLIENT_ENABLED(adapter))
+			i40evf_notify_client_message(&adapter->vsi,
+						     msg, msglen);
+		break;
+
+	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+		adapter->client_pending &=
+				~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+		break;
+	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+		struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+		if (msglen == sizeof(*vrh))
+			adapter->hena = vrh->hena;
+		else
+			dev_warn(&adapter->pdev->dev,
+				 "Invalid message %d from PF\n", v_opcode);
+		}
+		break;
+	case VIRTCHNL_OP_REQUEST_QUEUES: {
+		struct virtchnl_vf_res_request *vfres =
+			(struct virtchnl_vf_res_request *)msg;
+		if (vfres->num_queue_pairs != adapter->num_req_queues) {
+			dev_info(&adapter->pdev->dev,
+				 "Requested %d queues, PF can support %d\n",
+				 adapter->num_req_queues,
+				 vfres->num_queue_pairs);
+			adapter->num_req_queues = 0;
+			adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+		}
+		}
+		break;
+	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
+		struct i40evf_cloud_filter *cf;
+
+		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+			if (cf->state == __I40EVF_CF_ADD_PENDING)
+				cf->state = __I40EVF_CF_ACTIVE;
+		}
+		}
+		break;
+	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
+		struct i40evf_cloud_filter *cf, *cftmp;
+
+		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+					 list) {
+			if (cf->state == __I40EVF_CF_DEL_PENDING) {
+				cf->state = __I40EVF_CF_INVALID;
+				list_del(&cf->list);
+				kfree(cf);
+				adapter->num_cloud_filters--;
+			}
+		}
+		}
+		break;
+	default:
+		if (adapter->current_op && (v_opcode != adapter->current_op))
+			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
+				 adapter->current_op, v_opcode);
+		break;
+	} /* switch v_opcode */
+	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 87f98170ac93..6f9d563deb6b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3700,9 +3700,7 @@ static void igb_remove(struct pci_dev *pdev)
 	igb_release_hw_control(adapter);
 
 #ifdef CONFIG_PCI_IOV
-	rtnl_lock();
 	igb_disable_sriov(pdev);
-	rtnl_unlock();
 #endif
 
 	unregister_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index df827c254162..70f5f28bfd9e 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1070,7 +1070,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
 			  igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
 			  netdev);
 	if (err)
-		goto out;
+		goto free_irq_tx;
 
 	adapter->rx_ring->itr_register = E1000_EITR(vector);
 	adapter->rx_ring->itr_val = adapter->current_itr;
@@ -1079,10 +1079,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
 	err = request_irq(adapter->msix_entries[vector].vector,
 			  igbvf_msix_other, 0, netdev->name, netdev);
 	if (err)
-		goto out;
+		goto free_irq_rx;
 
 	igbvf_configure_msix(adapter);
 	return 0;
+free_irq_rx:
+	free_irq(adapter->msix_entries[--vector].vector, netdev);
+free_irq_tx:
+	free_irq(adapter->msix_entries[--vector].vector, netdev);
 out:
 	return err;
 }
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index b8ba3f94c363..a47a2e3e548c 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright(c) 2009 - 2018 Intel Corporation. */
 
+#include <linux/etherdevice.h>
+
 #include "vf.h"
 
 static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
 		/* set our "perm_addr" based on info provided by PF */
 		ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
 		if (!ret_val) {
-			if (msgbuf[0] == (E1000_VF_RESET |
-					  E1000_VT_MSGTYPE_ACK))
+			switch (msgbuf[0]) {
+			case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
 				memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
-			else
+				break;
+			case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
+				eth_zero_addr(hw->mac.perm_addr);
+				break;
+			default:
 				ret_val = -E1000_ERR_MAC_INIT;
+			}
 		}
 	}
 
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fd1311681200..f1a4b11ce0d1 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -542,6 +542,20 @@ struct mvneta_rx_desc {
 };
 #endif
 
+enum mvneta_tx_buf_type {
+	MVNETA_TYPE_SKB,
+	MVNETA_TYPE_XDP_TX,
+	MVNETA_TYPE_XDP_NDO,
+};
+
+struct mvneta_tx_buf {
+	enum mvneta_tx_buf_type type;
+	union {
+		struct xdp_frame *xdpf;
+		struct sk_buff *skb;
+	};
+};
+
 struct mvneta_tx_queue {
 	/* Number of this TX queue, in the range 0-7 */
 	u8 id;
@@ -557,8 +571,8 @@ struct mvneta_tx_queue {
 	int tx_stop_threshold;
 	int tx_wake_threshold;
 
-	/* Array of transmitted skb */
-	struct sk_buff **tx_skb;
+	/* Array of transmitted buffers */
+	struct mvneta_tx_buf *buf;
 
 	/* Index of last TX DMA descriptor that was inserted */
 	int txq_put_index;
@@ -1767,14 +1781,9 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
 	int i;
 
 	for (i = 0; i < num; i++) {
+		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
 		struct mvneta_tx_desc *tx_desc = txq->descs +
 			txq->txq_get_index;
-		struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
-
-		if (skb) {
-			bytes_compl += skb->len;
-			pkts_compl++;
-		}
 
 		mvneta_txq_inc_get(txq);
 
@@ -1782,9 +1791,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
 			dma_unmap_single(pp->dev->dev.parent,
 					 tx_desc->buf_phys_addr,
 					 tx_desc->data_size, DMA_TO_DEVICE);
-		if (!skb)
+		if (!buf->skb)
 			continue;
-		dev_kfree_skb_any(skb);
+
+		bytes_compl += buf->skb->len;
+		pkts_compl++;
+		dev_kfree_skb_any(buf->skb);
 	}
 
 	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
@@ -2238,16 +2250,19 @@ static inline void
 mvneta_tso_put_hdr(struct sk_buff *skb,
 		   struct mvneta_port *pp, struct mvneta_tx_queue *txq)
 {
-	struct mvneta_tx_desc *tx_desc;
 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+	struct mvneta_tx_desc *tx_desc;
 
-	txq->tx_skb[txq->txq_put_index] = NULL;
 	tx_desc = mvneta_txq_next_desc_get(txq);
 	tx_desc->data_size = hdr_len;
 	tx_desc->command = mvneta_skb_tx_csum(pp, skb);
 	tx_desc->command |= MVNETA_TXD_F_DESC;
 	tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
 				 txq->txq_put_index * TSO_HEADER_SIZE;
+	buf->type = MVNETA_TYPE_SKB;
+	buf->skb = NULL;
+
 	mvneta_txq_inc_put(txq);
 }
 
@@ -2256,6 +2271,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
 		    struct sk_buff *skb, char *data, int size,
 		    bool last_tcp, bool is_last)
 {
+	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
 	struct mvneta_tx_desc *tx_desc;
 
 	tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2269,7 +2285,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
 	}
 
 	tx_desc->command = 0;
-	txq->tx_skb[txq->txq_put_index] = NULL;
+	buf->type = MVNETA_TYPE_SKB;
+	buf->skb = NULL;
 
 	if (last_tcp) {
 		/* last descriptor in the TCP packet */
@@ -2277,7 +2294,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
 
 		/* last descriptor in SKB */
 		if (is_last)
-			txq->tx_skb[txq->txq_put_index] = skb;
+			buf->skb = skb;
 	}
 	mvneta_txq_inc_put(txq);
 	return 0;
@@ -2362,6 +2379,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
 
 	for (i = 0; i < nr_frags; i++) {
+		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 		void *addr = page_address(frag->page.p) + frag->page_offset;
 
@@ -2381,12 +2399,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
 		if (i == nr_frags - 1) {
 			/* Last descriptor */
 			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
-			txq->tx_skb[txq->txq_put_index] = skb;
+			buf->skb = skb;
 		} else {
 			/* Descriptor in the middle: Not First, Not Last */
 			tx_desc->command = 0;
-			txq->tx_skb[txq->txq_put_index] = NULL;
+			buf->skb = NULL;
 		}
+		buf->type = MVNETA_TYPE_SKB;
 		mvneta_txq_inc_put(txq);
 	}
 
@@ -2414,6 +2433,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 	struct mvneta_port *pp = netdev_priv(dev);
 	u16 txq_id = skb_get_queue_mapping(skb);
 	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
 	struct mvneta_tx_desc *tx_desc;
 	int len = skb->len;
 	int frags = 0;
@@ -2446,16 +2466,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 		goto out;
 	}
 
+	buf->type = MVNETA_TYPE_SKB;
 	if (frags == 1) {
 		/* First and Last descriptor */
 		tx_cmd |= MVNETA_TXD_FLZ_DESC;
 		tx_desc->command = tx_cmd;
-		txq->tx_skb[txq->txq_put_index] = skb;
+		buf->skb = skb;
 		mvneta_txq_inc_put(txq);
 	} else {
 		/* First but not Last */
 		tx_cmd |= MVNETA_TXD_F_DESC;
-		txq->tx_skb[txq->txq_put_index] = NULL;
+		buf->skb = NULL;
 		mvneta_txq_inc_put(txq);
 		tx_desc->command = tx_cmd;
 		/* Continue with other skb fragments */
@@ -3000,9 +3021,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
 
 	txq->last_desc = txq->size - 1;
 
-	txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
-				    GFP_KERNEL);
-	if (!txq->tx_skb) {
+	txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
+	if (!txq->buf) {
 		dma_free_coherent(pp->dev->dev.parent,
 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
 				  txq->descs, txq->descs_phys);
@@ -3014,7 +3034,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
 					   txq->size * TSO_HEADER_SIZE,
 					   &txq->tso_hdrs_phys, GFP_KERNEL);
 	if (!txq->tso_hdrs) {
-		kfree(txq->tx_skb);
+		kfree(txq->buf);
 		dma_free_coherent(pp->dev->dev.parent,
 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
 				  txq->descs, txq->descs_phys);
@@ -3069,7 +3089,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
 {
 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
 
-	kfree(txq->tx_skb);
+	kfree(txq->buf);
 
 	if (txq->tso_hdrs)
 		dma_free_coherent(pp->dev->dev.parent,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 722998d68564..6f1f53f91ed8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -109,12 +109,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
 		return -EOPNOTSUPP;
 
-	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
-	for (i = 0; i < ets->ets_cap; i++) {
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
 		err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
 		if (err)
 			return err;
+	}
 
+	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+	for (i = 0; i < ets->ets_cap; i++) {
 		err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
 		if (err)
 			return err;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 69282f31d519..fe54bcab705f 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -255,7 +255,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
 	 */
 
 	laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
-	if (!laddr) {
+	if (dma_mapping_error(lp->device, laddr)) {
 		pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
 		dev_kfree_skb_any(skb);
 		return NETDEV_TX_OK;
@@ -473,7 +473,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
 
 	*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
 				   SONIC_RBSIZE, DMA_FROM_DEVICE);
-	if (!*new_addr) {
+	if (dma_mapping_error(lp->device, *new_addr)) {
 		dev_kfree_skb(*new_skb);
 		*new_skb = NULL;
 		return false;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 402c1c3d84ce..5c8eaded6b30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -4403,6 +4403,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
 	}
 
 	vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+	if (!vf)
+		return -EINVAL;
+
 	vport_id = vf->vport_id;
 
 	return qed_configure_vport_wfq(cdev, vport_id, rate);
@@ -5142,7 +5145,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
 
 		/* Validate that the VF has a configured vport */
 		vf = qed_iov_get_vf_info(hwfn, i, true);
-		if (!vf->vport_instance)
+		if (!vf || !vf->vport_instance)
 			continue;
 
 		memset(&params, 0, sizeof(params));
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 76a9b37c8680..3c764c28d5db 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -752,9 +752,15 @@ static int emac_remove(struct platform_device *pdev)
 	struct net_device *netdev = dev_get_drvdata(&pdev->dev);
 	struct emac_adapter *adpt = netdev_priv(netdev);
 
+	netif_carrier_off(netdev);
+	netif_tx_disable(netdev);
+
 	unregister_netdev(netdev);
 	netif_napi_del(&adpt->rx_q.napi);
 
+	free_irq(adpt->irq.irq, &adpt->irq);
+	cancel_work_sync(&adpt->work_thread);
+
 	emac_clks_teardown(adpt);
 
 	put_device(&adpt->phydev->mdio.dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 75237c81c63d..572294678faf 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -330,15 +330,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
 
 	/* set up the hardware pointers in each descriptor */
 	for (i = 0; i < no; i++, descr++) {
+		dma_addr_t cpu_addr;
+
 		gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
-		descr->bus_addr =
-			dma_map_single(ctodev(card), descr,
-				       GELIC_DESCR_SIZE,
-				       DMA_BIDIRECTIONAL);
 
-		if (!descr->bus_addr)
+		cpu_addr = dma_map_single(ctodev(card), descr,
+					  GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+
+		if (dma_mapping_error(ctodev(card), cpu_addr))
 			goto iommu_error;
 
+		descr->bus_addr = cpu_to_be32(cpu_addr);
 		descr->next = descr + 1;
 		descr->prev = descr - 1;
 	}
@@ -378,28 +380,30 @@ static int gelic_card_init_chain(struct gelic_card *card,
  *
  * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
  * Activate the descriptor state-wise
+ *
+ * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
+ * must be a multiple of GELIC_NET_RXBUF_ALIGN.
  */
 static int gelic_descr_prepare_rx(struct gelic_card *card,
 				  struct gelic_descr *descr)
 {
+	static const unsigned int rx_skb_size =
+		ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
+		GELIC_NET_RXBUF_ALIGN - 1;
+	dma_addr_t cpu_addr;
 	int offset;
-	unsigned int bufsize;
 
 	if (gelic_descr_get_status(descr) !=  GELIC_DESCR_DMA_NOT_IN_USE)
 		dev_info(ctodev(card), "%s: ERROR status\n", __func__);
-	/* we need to round up the buffer size to a multiple of 128 */
-	bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
 
-	/* and we need to have it 128 byte aligned, therefore we allocate a
-	 * bit more */
-	descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
+	descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
 	if (!descr->skb) {
 		descr->buf_addr = 0; /* tell DMAC don't touch memory */
 		dev_info(ctodev(card),
 			 "%s:allocate skb failed !!\n", __func__);
 		return -ENOMEM;
 	}
-	descr->buf_size = cpu_to_be32(bufsize);
+	descr->buf_size = cpu_to_be32(rx_skb_size);
 	descr->dmac_cmd_status = 0;
 	descr->result_size = 0;
 	descr->valid_size = 0;
@@ -410,11 +414,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
 	if (offset)
 		skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
 	/* io-mmu-map the skb */
-	descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
-						     descr->skb->data,
-						     GELIC_NET_MAX_MTU,
-						     DMA_FROM_DEVICE));
-	if (!descr->buf_addr) {
+	cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
+				  GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
+	descr->buf_addr = cpu_to_be32(cpu_addr);
+	if (dma_mapping_error(ctodev(card), cpu_addr)) {
 		dev_kfree_skb_any(descr->skb);
 		descr->skb = NULL;
 		dev_info(ctodev(card),
@@ -794,7 +797,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
 
 	buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
 
-	if (!buf) {
+	if (dma_mapping_error(ctodev(card), buf)) {
 		dev_err(ctodev(card),
 			"dma map 2 failed (%p, %i). Dropping packet\n",
 			skb->data, skb->len);
@@ -930,7 +933,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
 	data_error = be32_to_cpu(descr->data_error);
 	/* unmap skb buffer */
 	dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
-			 GELIC_NET_MAX_MTU,
+			 GELIC_NET_MAX_FRAME,
 			 DMA_FROM_DEVICE);
 
 	skb_put(skb, be32_to_cpu(descr->valid_size)?
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index fbbf9b54b173..0e592fc19f6c 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -32,8 +32,9 @@
 #define GELIC_NET_RX_DESCRIPTORS        128 /* num of descriptors */
 #define GELIC_NET_TX_DESCRIPTORS        128 /* num of descriptors */
 
-#define GELIC_NET_MAX_MTU               VLAN_ETH_FRAME_LEN
-#define GELIC_NET_MIN_MTU               VLAN_ETH_ZLEN
+#define GELIC_NET_MAX_FRAME             2312
+#define GELIC_NET_MAX_MTU               2294
+#define GELIC_NET_MIN_MTU               64
 #define GELIC_NET_RXBUF_ALIGN           128
 #define GELIC_CARD_RX_CSUM_DEFAULT      1 /* hw chksum */
 #define GELIC_NET_WATCHDOG_TIMEOUT      5*HZ
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index fd5288ff53b5..e3438cef5f9c 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -503,6 +503,11 @@ static void
 xirc2ps_detach(struct pcmcia_device *link)
 {
     struct net_device *dev = link->priv;
+    struct local_info *local = netdev_priv(dev);
+
+    netif_carrier_off(dev);
+    netif_tx_disable(dev);
+    cancel_work_sync(&local->tx_timeout_task);
 
     dev_dbg(&link->dev, "detach\n");
 
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 917edb3d04b7..f75faec23cc9 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -1943,10 +1943,9 @@ static int ca8210_skb_tx(
 	struct ca8210_priv  *priv
 )
 {
-	int status;
 	struct ieee802154_hdr header = { };
 	struct secspec secspec;
-	unsigned int mac_len;
+	int mac_len, status;
 
 	dev_dbg(&priv->spi->dev, "%s called\n", __func__);
 
@@ -1954,6 +1953,8 @@ static int ca8210_skb_tx(
 	 * packet
 	 */
 	mac_len = ieee802154_hdr_peek_addrs(skb, &header);
+	if (mac_len < 0)
+		return mac_len;
 
 	secspec.security_level = header.sec.level;
 	secspec.key_id_mode = header.sec.key_id_mode;
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c
index c0c922eff760..959bf342133a 100644
--- a/drivers/net/phy/mdio-thunder.c
+++ b/drivers/net/phy/mdio-thunder.c
@@ -107,6 +107,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
 		if (i >= ARRAY_SIZE(nexus->buses))
 			break;
 	}
+	fwnode_handle_put(fwn);
 	return 0;
 
 err_release_regions:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5194b2ccd4b7..e61f02f7642c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -256,6 +256,9 @@ struct tun_struct {
 	struct tun_prog __rcu *steering_prog;
 	struct tun_prog __rcu *filter_prog;
 	struct ethtool_link_ksettings link_ksettings;
+	/* init args */
+	struct file *file;
+	struct ifreq *ifr;
 };
 
 struct veth {
@@ -281,6 +284,9 @@ void *tun_ptr_to_xdp(void *ptr)
 }
 EXPORT_SYMBOL(tun_ptr_to_xdp);
 
+static void tun_flow_init(struct tun_struct *tun);
+static void tun_flow_uninit(struct tun_struct *tun);
+
 static int tun_napi_receive(struct napi_struct *napi, int budget)
 {
 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -1038,6 +1044,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 
 static const struct ethtool_ops tun_ethtool_ops;
 
+static int tun_net_init(struct net_device *dev)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	struct ifreq *ifr = tun->ifr;
+	int err;
+
+	tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
+	if (!tun->pcpu_stats)
+		return -ENOMEM;
+
+	spin_lock_init(&tun->lock);
+
+	err = security_tun_dev_alloc_security(&tun->security);
+	if (err < 0) {
+		free_percpu(tun->pcpu_stats);
+		return err;
+	}
+
+	tun_flow_init(tun);
+
+	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
+			   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+			   NETIF_F_HW_VLAN_STAG_TX;
+	dev->features = dev->hw_features | NETIF_F_LLTX;
+	dev->vlan_features = dev->features &
+			     ~(NETIF_F_HW_VLAN_CTAG_TX |
+			       NETIF_F_HW_VLAN_STAG_TX);
+
+	tun->flags = (tun->flags & ~TUN_FEATURES) |
+		      (ifr->ifr_flags & TUN_FEATURES);
+
+	INIT_LIST_HEAD(&tun->disabled);
+	err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
+			 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
+	if (err < 0) {
+		tun_flow_uninit(tun);
+		security_tun_dev_free_security(tun->security);
+		free_percpu(tun->pcpu_stats);
+		return err;
+	}
+	return 0;
+}
+
 /* Net device detach from fd. */
 static void tun_net_uninit(struct net_device *dev)
 {
@@ -1268,6 +1317,7 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 }
 
 static const struct net_device_ops tun_netdev_ops = {
+	.ndo_init		= tun_net_init,
 	.ndo_uninit		= tun_net_uninit,
 	.ndo_open		= tun_net_open,
 	.ndo_stop		= tun_net_close,
@@ -1347,6 +1397,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
 }
 
 static const struct net_device_ops tap_netdev_ops = {
+	.ndo_init		= tun_net_init,
 	.ndo_uninit		= tun_net_uninit,
 	.ndo_open		= tun_net_open,
 	.ndo_stop		= tun_net_close,
@@ -1386,7 +1437,7 @@ static void tun_flow_uninit(struct tun_struct *tun)
 #define MAX_MTU 65535
 
 /* Initialize net device. */
-static void tun_net_init(struct net_device *dev)
+static void tun_net_initialize(struct net_device *dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
 
@@ -2658,9 +2709,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
 		if (!dev)
 			return -ENOMEM;
-		err = dev_get_valid_name(net, dev, name);
-		if (err < 0)
-			goto err_free_dev;
 
 		dev_net_set(dev, net);
 		dev->rtnl_link_ops = &tun_link_ops;
@@ -2679,41 +2727,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 		tun->rx_batched = 0;
 		RCU_INIT_POINTER(tun->steering_prog, NULL);
 
-		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
-		if (!tun->pcpu_stats) {
-			err = -ENOMEM;
-			goto err_free_dev;
-		}
-
-		spin_lock_init(&tun->lock);
-
-		err = security_tun_dev_alloc_security(&tun->security);
-		if (err < 0)
-			goto err_free_stat;
-
-		tun_net_init(dev);
-		tun_flow_init(tun);
-
-		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
-				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
-				   NETIF_F_HW_VLAN_STAG_TX;
-		dev->features = dev->hw_features | NETIF_F_LLTX;
-		dev->vlan_features = dev->features &
-				     ~(NETIF_F_HW_VLAN_CTAG_TX |
-				       NETIF_F_HW_VLAN_STAG_TX);
+		tun->ifr = ifr;
+		tun->file = file;
 
-		tun->flags = (tun->flags & ~TUN_FEATURES) |
-			      (ifr->ifr_flags & TUN_FEATURES);
-
-		INIT_LIST_HEAD(&tun->disabled);
-		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
-				 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
-		if (err < 0)
-			goto err_free_flow;
+		tun_net_initialize(dev);
 
 		err = register_netdevice(tun->dev);
-		if (err < 0)
-			goto err_detach;
+		if (err < 0) {
+			free_netdev(dev);
+			return err;
+		}
 		/* free_netdev() won't check refcnt, to aovid race
 		 * with dev_put() we need publish tun after registration.
 		 */
@@ -2732,20 +2755,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
 	strcpy(ifr->ifr_name, tun->dev->name);
 	return 0;
-
-err_detach:
-	tun_detach_all(dev);
-	/* register_netdevice() already called tun_free_netdev() */
-	goto err_free_dev;
-
-err_free_flow:
-	tun_flow_uninit(tun);
-	security_tun_dev_free_security(tun->security);
-err_free_stat:
-	free_percpu(tun->pcpu_stats);
-err_free_dev:
-	free_netdev(dev);
-	return err;
 }
 
 static void tun_get_iff(struct net *net, struct tun_struct *tun,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 41bac861ca99..72a93dc2df86 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -665,6 +665,11 @@ static const struct usb_device_id mbim_devs[] = {
 	  .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
 	},
 
+	/* Telit FE990 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+	  .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+	},
+
 	/* default entry */
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
 	  .driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 24ce49b311c4..5417932242e7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1322,6 +1322,7 @@ static const struct usb_device_id products[] = {
 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)},	/* Telit FN980 */
 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)},	/* Telit LN920 */
 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)},	/* Telit FN990 */
+	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
 	{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},	/* Telit ME910 */
 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 4f29010e1aef..085048686413 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1950,6 +1950,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 		size = (u16)((header & RX_STS_FL_) >> 16);
 		align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
 
+		if (unlikely(size > skb->len)) {
+			netif_dbg(dev, rx_err, dev->net,
+				  "size err header=0x%08x\n", header);
+			return 0;
+		}
+
 		if (unlikely(header & RX_STS_ES_)) {
 			netif_dbg(dev, rx_err, dev->net,
 				  "Error header=0x%08x\n", header);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 92d30ebdb111..2b984c5bae24 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
 	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
 	grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
 
-	struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
+	struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
 	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
 	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
 	/* passed to gnttab_[un]map_refs with pages under (un)mapping */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index fc389f2bba7a..ed644b6824ce 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -327,6 +327,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
 struct xenvif_tx_cb {
 	u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
 	u8 copy_count;
+	u32 split_mask;
 };
 
 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
@@ -354,6 +355,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 	struct sk_buff *skb =
 		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
 			  GFP_ATOMIC | __GFP_NOWARN);
+
+	BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
 	if (unlikely(skb == NULL))
 		return NULL;
 
@@ -389,11 +392,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
 	nr_slots = shinfo->nr_frags + 1;
 
 	copy_count(skb) = 0;
+	XENVIF_TX_CB(skb)->split_mask = 0;
 
 	/* Create copy ops for exactly data_len bytes into the skb head. */
 	__skb_put(skb, data_len);
 	while (data_len > 0) {
 		int amount = data_len > txp->size ? txp->size : data_len;
+		bool split = false;
 
 		cop->source.u.ref = txp->gref;
 		cop->source.domid = queue->vif->domid;
@@ -406,6 +411,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
 		cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
 				               - data_len);
 
+		/* Don't cross local page boundary! */
+		if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
+			amount = XEN_PAGE_SIZE - cop->dest.offset;
+			XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
+			split = true;
+		}
+
 		cop->len = amount;
 		cop->flags = GNTCOPY_source_gref;
 
@@ -413,7 +425,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
 		pending_idx = queue->pending_ring[index];
 		callback_param(queue, pending_idx).ctx = NULL;
 		copy_pending_idx(skb, copy_count(skb)) = pending_idx;
-		copy_count(skb)++;
+		if (!split)
+			copy_count(skb)++;
 
 		cop++;
 		data_len -= amount;
@@ -434,7 +447,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
 			nr_slots--;
 		} else {
 			/* The copy op partially covered the tx_request.
-			 * The remainder will be mapped.
+			 * The remainder will be mapped or copied in the next
+			 * iteration.
 			 */
 			txp->offset += amount;
 			txp->size -= amount;
@@ -532,6 +546,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 		pending_idx = copy_pending_idx(skb, i);
 
 		newerr = (*gopp_copy)->status;
+
+		/* Split copies need to be handled together. */
+		if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
+			(*gopp_copy)++;
+			if (!newerr)
+				newerr = (*gopp_copy)->status;
+		}
 		if (likely(!newerr)) {
 			/* The first frag might still have this slot mapped */
 			if (i < copy_count(skb) - 1 || !sharedslot)
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 89d88e447d44..5b883eb49ce9 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -1080,7 +1080,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
 		dev_err(dev, "can't add the irq domain\n");
 		return -ENODEV;
 	}
-	atmel_pioctrl->irq_domain->name = "atmel gpio";
 
 	for (i = 0; i < atmel_pioctrl->npins; i++) {
 		int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);
diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
index 60099815296e..b2d38eb32288 100644
--- a/drivers/power/supply/da9150-charger.c
+++ b/drivers/power/supply/da9150-charger.c
@@ -666,6 +666,7 @@ static int da9150_charger_remove(struct platform_device *pdev)
 
 	if (!IS_ERR_OR_NULL(charger->usb_phy))
 		usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
+	cancel_work_sync(&charger->otg_work);
 
 	power_supply_unregister(charger->battery);
 	power_supply_unregister(charger->usb);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4cf7c3348bff..9be913c19a6e 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1050,10 +1050,12 @@ static int alua_activate(struct scsi_device *sdev,
 	rcu_read_unlock();
 	mutex_unlock(&h->init_mutex);
 
-	if (alua_rtpg_queue(pg, sdev, qdata, true))
+	if (alua_rtpg_queue(pg, sdev, qdata, true)) {
 		fn = NULL;
-	else
+	} else {
+		kfree(qdata);
 		err = SCSI_DH_DEV_OFFLINED;
+	}
 	kref_put(&pg->kref, release_port_group);
 out:
 	if (fn)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index bdb12bf0d5c7..b400167f9ad4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -4367,7 +4367,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
 	devhandle = megasas_get_tm_devhandle(scmd->device);
 
 	if (devhandle == (u16)ULONG_MAX) {
-		ret = SUCCESS;
+		ret = FAILED;
 		sdev_printk(KERN_INFO, scmd->device,
 			"task abort issued for invalid devhandle\n");
 		mutex_unlock(&instance->reset_mutex);
@@ -4440,7 +4440,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
 	devhandle = megasas_get_tm_devhandle(scmd->device);
 
 	if (devhandle == (u16)ULONG_MAX) {
-		ret = SUCCESS;
+		ret = FAILED;
 		sdev_printk(KERN_INFO, scmd->device,
 			"target reset issued for invalid devhandle\n");
 		mutex_unlock(&instance->reset_mutex);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 6a2a413cc97e..d8557a00e1ec 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -232,6 +232,7 @@ static struct {
 	{"SGI", "RAID5", "*", BLIST_SPARSELUN},
 	{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
 	{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+	{"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
 	{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index abc156cf05f6..b45cd6c98bad 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8228,5 +8228,6 @@ EXPORT_SYMBOL_GPL(ufshcd_init);
 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@...sung.com>");
 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@...sung.com>");
 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 29a37b242d30..01f93de93c8c 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1270,18 +1270,20 @@ static struct iscsi_param *iscsi_check_key(
 		return param;
 
 	if (!(param->phase & phase)) {
-		pr_err("Key \"%s\" may not be negotiated during ",
-				param->name);
+		char *phase_name;
+
 		switch (phase) {
 		case PHASE_SECURITY:
-			pr_debug("Security phase.\n");
+			phase_name = "Security";
 			break;
 		case PHASE_OPERATIONAL:
-			pr_debug("Operational phase.\n");
+			phase_name = "Operational";
 			break;
 		default:
-			pr_debug("Unknown phase.\n");
+			phase_name = "Unknown";
 		}
+		pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
+				param->name, phase_name);
 		return NULL;
 	}
 
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 384623c49cfe..d22c7216d68c 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -38,7 +38,7 @@
 
 #define NHI_MAILBOX_TIMEOUT	500 /* ms */
 
-static int ring_interrupt_index(struct tb_ring *ring)
+static int ring_interrupt_index(const struct tb_ring *ring)
 {
 	int bit = ring->hop;
 	if (!ring->is_tx)
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 47ffb485ff34..59d85bdd132b 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -43,6 +43,7 @@ struct xencons_info {
 	int irq;
 	int vtermno;
 	grant_ref_t gntref;
+	spinlock_t ring_lock;
 };
 
 static LIST_HEAD(xenconsoles);
@@ -89,12 +90,15 @@ static int __write_console(struct xencons_info *xencons,
 	XENCONS_RING_IDX cons, prod;
 	struct xencons_interface *intf = xencons->intf;
 	int sent = 0;
+	unsigned long flags;
 
+	spin_lock_irqsave(&xencons->ring_lock, flags);
 	cons = intf->out_cons;
 	prod = intf->out_prod;
 	mb();			/* update queue values before going on */
 
 	if ((prod - cons) > sizeof(intf->out)) {
+		spin_unlock_irqrestore(&xencons->ring_lock, flags);
 		pr_err_once("xencons: Illegal ring page indices");
 		return -EINVAL;
 	}
@@ -104,6 +108,7 @@ static int __write_console(struct xencons_info *xencons,
 
 	wmb();			/* write ring before updating pointer */
 	intf->out_prod = prod;
+	spin_unlock_irqrestore(&xencons->ring_lock, flags);
 
 	if (sent)
 		notify_daemon(xencons);
@@ -146,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
 	int recv = 0;
 	struct xencons_info *xencons = vtermno_to_xencons(vtermno);
 	unsigned int eoiflag = 0;
+	unsigned long flags;
 
 	if (xencons == NULL)
 		return -EINVAL;
 	intf = xencons->intf;
 
+	spin_lock_irqsave(&xencons->ring_lock, flags);
 	cons = intf->in_cons;
 	prod = intf->in_prod;
 	mb();			/* get pointers before reading ring */
 
 	if ((prod - cons) > sizeof(intf->in)) {
+		spin_unlock_irqrestore(&xencons->ring_lock, flags);
 		pr_err_once("xencons: Illegal ring page indices");
 		return -EINVAL;
 	}
@@ -179,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
 		xencons->out_cons = intf->out_cons;
 		xencons->out_cons_same = 0;
 	}
+	if (!recv && xencons->out_cons_same++ > 1) {
+		eoiflag = XEN_EOI_FLAG_SPURIOUS;
+	}
+	spin_unlock_irqrestore(&xencons->ring_lock, flags);
+
 	if (recv) {
 		notify_daemon(xencons);
-	} else if (xencons->out_cons_same++ > 1) {
-		eoiflag = XEN_EOI_FLAG_SPURIOUS;
 	}
 
 	xen_irq_lateeoi(xencons->irq, eoiflag);
@@ -239,6 +250,7 @@ static int xen_hvm_console_init(void)
 		info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
 		if (!info)
 			return -ENOMEM;
+		spin_lock_init(&info->ring_lock);
 	} else if (info->intf != NULL) {
 		/* already configured */
 		return 0;
@@ -275,6 +287,7 @@ static int xen_hvm_console_init(void)
 
 static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
 {
+	spin_lock_init(&info->ring_lock);
 	info->evtchn = xen_start_info->console.domU.evtchn;
 	/* GFN == MFN for PV guest */
 	info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
@@ -325,6 +338,7 @@ static int xen_initial_domain_console_init(void)
 		info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
 		if (!info)
 			return -ENOMEM;
+		spin_lock_init(&info->ring_lock);
 	}
 
 	info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
@@ -482,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev,
 	info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
+	spin_lock_init(&info->ring_lock);
 	dev_set_drvdata(&dev->dev, info);
 	info->xbdev = dev;
 	info->vtermno = xenbus_devid_to_vtermno(devid);
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6a2cc5cd0281..d0e9f3265f5a 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -202,6 +202,7 @@ struct hw_bank {
  * @in_lpm: if the core in low power mode
  * @wakeup_int: if wakeup interrupt occur
  * @rev: The revision number for controller
+ * @mutex: protect code from concorrent running when doing role switch
  */
 struct ci_hdrc {
 	struct device			*dev;
@@ -254,6 +255,7 @@ struct ci_hdrc {
 	bool				in_lpm;
 	bool				wakeup_int;
 	enum ci_revision		rev;
+	struct mutex                    mutex;
 };
 
 static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index c13f9a153a5c..3fd1073a345d 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -872,9 +872,16 @@ static ssize_t role_store(struct device *dev,
 			     strlen(ci->roles[role]->name)))
 			break;
 
-	if (role == CI_ROLE_END || role == ci->role)
+	if (role == CI_ROLE_END)
 		return -EINVAL;
 
+	mutex_lock(&ci->mutex);
+
+	if (role == ci->role) {
+		mutex_unlock(&ci->mutex);
+		return n;
+	}
+
 	pm_runtime_get_sync(dev);
 	disable_irq(ci->irq);
 	ci_role_stop(ci);
@@ -883,6 +890,7 @@ static ssize_t role_store(struct device *dev,
 		ci_handle_vbus_change(ci);
 	enable_irq(ci->irq);
 	pm_runtime_put_sync(dev);
+	mutex_unlock(&ci->mutex);
 
 	return (ret == 0) ? n : ret;
 }
@@ -921,6 +929,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	spin_lock_init(&ci->lock);
+	mutex_init(&ci->mutex);
 	ci->dev = dev;
 	ci->platdata = dev_get_platdata(dev);
 	ci->imx28_write_fix = !!(ci->platdata->flags &
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index f25d4827fd49..a714cf3f0ab7 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -164,8 +164,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
 
 static void ci_handle_id_switch(struct ci_hdrc *ci)
 {
-	enum ci_role role = ci_otg_role(ci);
+	enum ci_role role;
 
+	mutex_lock(&ci->mutex);
+	role = ci_otg_role(ci);
 	if (role != ci->role) {
 		dev_dbg(ci->dev, "switching from %s to %s\n",
 			ci_role(ci)->name, ci->roles[role]->name);
@@ -188,6 +190,7 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
 		if (role == CI_ROLE_GADGET)
 			ci_handle_vbus_change(ci);
 	}
+	mutex_unlock(&ci->mutex);
 }
 /**
  * ci_otg_work - perform otg (vbus/id) event handle
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 168303f21bf4..3136a239e782 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -626,7 +626,7 @@ void g_audio_cleanup(struct g_audio *g_audio)
 	uac = g_audio->uac;
 	card = uac->card;
 	if (card)
-		snd_card_free(card);
+		snd_card_free_when_closed(card);
 
 	kfree(uac->p_prm.ureq);
 	kfree(uac->c_prm.ureq);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 3e2474959735..7679fb583e41 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -148,7 +148,7 @@ static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode)
 		uhcrhda |= RH_A_NPS;
 		break;
 	case PMM_GLOBAL_MODE:
-		uhcrhda &= ~(RH_A_NPS & RH_A_PSM);
+		uhcrhda &= ~(RH_A_NPS | RH_A_PSM);
 		break;
 	case PMM_PERPORT_MODE:
 		uhcrhda &= ~(RH_A_NPS);
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d4fa29b623ff..a4513dd931b2 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_BROKEN_FUA),
 
+/* Reported by: Yaroslav Furman <yaro330@...il.com> */
+UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
+		"JMicron",
+		"JMS583Gen 2",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_NO_REPORT_OPCODES),
+
 /* Reported-by: Thinh Nguyen <thinhn@...opsys.com> */
 UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
 		"PNY",
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 3872ccef4cb2..f8e83a951918 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1039,6 +1039,9 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
 	u32 pixclock;
 	int screen_size, plane;
 
+	if (!var->pixclock)
+		return -EINVAL;
+
 	plane = fbdev->plane;
 
 	/* Make sure that the mode respect all LCD controller and
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index 138da6cb6cbc..4345246b4c79 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -247,6 +247,9 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
 
 static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
+	if (!var->pixclock)
+		return -EINVAL;
+
 	if (var->xres > 1920 || var->yres > 1440)
 		return -EINVAL;
 
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index d7463a2a5d83..c97c0c851480 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -1215,6 +1215,9 @@ static int intelfb_check_var(struct fb_var_screeninfo *var,
 
 	dinfo = GET_DINFO(info);
 
+	if (!var->pixclock)
+		return -EINVAL;
+
 	/* update the pitch */
 	if (intelfbhw_validate_mode(dinfo, var) != 0)
 		return -EINVAL;
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index fbeeed5afe35..aa502b3ba25a 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -766,6 +766,8 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var,
 	int pitch, err = 0;
 
 	NVTRACE_ENTER();
+	if (!var->pixclock)
+		return -EINVAL;
 
 	var->transp.offset = 0;
 	var->transp.length = 0;
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 65ba9921506e..9d2912947eef 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -166,6 +166,9 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
 	struct tga_par *par = (struct tga_par *)info->par;
 
+	if (!var->pixclock)
+		return -EINVAL;
+
 	if (par->tga_type == TGA_TYPE_8PLANE) {
 		if (var->bits_per_pixel != 8)
 			return -EINVAL;
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index f047e87871a1..c1d5daa4b351 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -121,7 +121,10 @@ extern const struct dentry_operations cifs_ci_dentry_ops;
 #ifdef CONFIG_CIFS_DFS_UPCALL
 extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
 #else
-#define cifs_dfs_d_automount NULL
+static inline struct vfsmount *cifs_dfs_d_automount(struct path *path)
+{
+	return ERR_PTR(-EREMOTE);
+}
 #endif
 
 /* Functions related to symlinks */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index cb70f0c6aa1b..d16fd8d1f291 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4895,8 +4895,13 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
 		return -ENODEV;
 
 getDFSRetry:
-	rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB,
-		      (void **) &pSMBr);
+	/*
+	 * Use smb_init_no_reconnect() instead of smb_init() as
+	 * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus
+	 * causing an infinite recursion.
+	 */
+	rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc,
+				   (void **)&pSMB, (void **)&pSMBr);
 	if (rc)
 		return rc;
 
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index f906984eb25b..118bcb351af9 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -475,7 +475,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
 	if (rc == -EOPNOTSUPP) {
 		cifs_dbg(FYI,
 			 "server does not support query network interfaces\n");
-		goto out;
+		ret_data_len = 0;
 	} else if (rc != 0) {
 		cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
 		goto out;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e844d91c461b..7aaf4dafd3e7 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1428,7 +1428,8 @@ static int ext4_write_end(struct file *file,
 	int inline_data = ext4_has_inline_data(inode);
 
 	trace_ext4_write_end(inode, pos, len, copied);
-	if (inline_data) {
+	if (inline_data &&
+	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
 		ret = ext4_write_inline_data_end(inode, pos, len,
 						 copied, page);
 		if (ret < 0) {
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index c5390421cca2..d9866d89f2fb 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -480,8 +480,6 @@ int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
 		return error;
 
 	kaddr = kmap_atomic(page);
-	if (dsize > gfs2_max_stuffed_size(ip))
-		dsize = gfs2_max_stuffed_size(ip);
 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
 	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
 	kunmap_atomic(kaddr);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 150cec85c416..ccafd45b63f6 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -72,9 +72,6 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
 		void *kaddr = kmap(page);
 		u64 dsize = i_size_read(inode);
  
-		if (dsize > gfs2_max_stuffed_size(ip))
-			dsize = gfs2_max_stuffed_size(ip);
-
 		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
 		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
 		kunmap(page);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 20f08f4391c9..a7a423adf7c8 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -388,6 +388,9 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
 	ip->i_depth = (u8)depth;
 	ip->i_entries = be32_to_cpu(str->di_entries);
 
+	if (gfs2_is_stuffed(ip) && ip->i_inode.i_size > gfs2_max_stuffed_size(ip))
+		goto corrupt;
+
 	if (S_ISREG(ip->i_inode.i_mode))
 		gfs2_set_aops(&ip->i_inode);
 
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 708aa1b92036..dfb2083b8ce1 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -70,7 +70,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
 	if (argv->v_index > ~(__u64)0 - argv->v_nmembs)
 		return -EINVAL;
 
-	buf = (void *)__get_free_pages(GFP_NOFS, 0);
+	buf = (void *)get_zeroed_page(GFP_NOFS);
 	if (unlikely(!buf))
 		return -ENOMEM;
 	maxmembs = PAGE_SIZE / argv->v_size;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index b6948813eb06..1353db3f7f48 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2003,11 +2003,25 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
 	}
 
 	if (unlikely(copied < len) && wc->w_target_page) {
+		loff_t new_isize;
+
 		if (!PageUptodate(wc->w_target_page))
 			copied = 0;
 
-		ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
-				       start+len);
+		new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
+		if (new_isize > page_offset(wc->w_target_page))
+			ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
+					       start+len);
+		else {
+			/*
+			 * When page is fully beyond new isize (data copy
+			 * failed), do not bother zeroing the page. Invalidate
+			 * it instead so that writeback does not get confused
+			 * put page & buffer dirty bits into inconsistent
+			 * state.
+			 */
+			block_invalidatepage(wc->w_target_page, 0, PAGE_SIZE);
+		}
 	}
 	if (wc->w_target_page)
 		flush_dcache_page(wc->w_target_page);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 934633a05d20..7f1478c26a33 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -40,14 +40,14 @@ static inline bool cpusets_enabled(void)
 
 static inline void cpuset_inc(void)
 {
-	static_branch_inc(&cpusets_pre_enable_key);
-	static_branch_inc(&cpusets_enabled_key);
+	static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
+	static_branch_inc_cpuslocked(&cpusets_enabled_key);
 }
 
 static inline void cpuset_dec(void)
 {
-	static_branch_dec(&cpusets_enabled_key);
-	static_branch_dec(&cpusets_pre_enable_key);
+	static_branch_dec_cpuslocked(&cpusets_enabled_key);
+	static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
 }
 
 extern int cpuset_init(void);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 24e16538e4d7..285101772c75 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -603,7 +603,7 @@ static int __init bpf_jit_charge_init(void)
 {
 	/* Only used as heuristic here to derive limit. */
 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
-	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
+	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
 					    PAGE_SIZE), LONG_MAX);
 	return 0;
 }
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 61644976225a..c0ebb70808b6 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -13,6 +13,7 @@
 #include <linux/delayacct.h>
 #include <linux/pid_namespace.h>
 #include <linux/cgroupstats.h>
+#include <linux/cpu.h>
 
 #include <trace/events/cgroup.h>
 
@@ -55,6 +56,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 	int retval = 0;
 
 	mutex_lock(&cgroup_mutex);
+	get_online_cpus();
 	percpu_down_write(&cgroup_threadgroup_rwsem);
 	for_each_root(root) {
 		struct cgroup *from_cgrp;
@@ -71,6 +73,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 			break;
 	}
 	percpu_up_write(&cgroup_threadgroup_rwsem);
+	put_online_cpus();
 	mutex_unlock(&cgroup_mutex);
 
 	return retval;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index a892a99eb4bf..a8185cdb8587 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -55,6 +55,7 @@
 #include <linux/nsproxy.h>
 #include <linux/file.h>
 #include <linux/sched/cputime.h>
+#include <linux/cpu.h>
 #include <net/sock.h>
 
 #define CREATE_TRACE_POINTS
@@ -2209,6 +2210,45 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
 }
 EXPORT_SYMBOL_GPL(task_cgroup_path);
 
+/**
+ * cgroup_attach_lock - Lock for ->attach()
+ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
+ *
+ * cgroup migration sometimes needs to stabilize threadgroups against forks and
+ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
+ * implementations (e.g. cpuset), also need to disable CPU hotplug.
+ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
+ * lead to deadlocks.
+ *
+ * Bringing up a CPU may involve creating and destroying tasks which requires
+ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
+ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
+ * write-locking threadgroup_rwsem, the locking order is reversed and we end up
+ * waiting for an on-going CPU hotplug operation which in turn is waiting for
+ * the threadgroup_rwsem to be released to create new tasks. For more details:
+ *
+ *   http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
+ *
+ * Resolve the situation by always acquiring cpus_read_lock() before optionally
+ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
+ * CPU hotplug is disabled on entry.
+ */
+static void cgroup_attach_lock(void)
+{
+	get_online_cpus();
+	percpu_down_write(&cgroup_threadgroup_rwsem);
+}
+
+/**
+ * cgroup_attach_unlock - Undo cgroup_attach_lock()
+ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
+ */
+static void cgroup_attach_unlock(void)
+{
+	percpu_up_write(&cgroup_threadgroup_rwsem);
+	put_online_cpus();
+}
+
 /**
  * cgroup_migrate_add_task - add a migration target task to a migration context
  * @task: target task
@@ -2694,7 +2734,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
 		return ERR_PTR(-EINVAL);
 
-	percpu_down_write(&cgroup_threadgroup_rwsem);
+	cgroup_attach_lock();
 
 	rcu_read_lock();
 	if (pid) {
@@ -2725,7 +2765,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
 	goto out_unlock_rcu;
 
 out_unlock_threadgroup:
-	percpu_up_write(&cgroup_threadgroup_rwsem);
+	cgroup_attach_unlock();
 out_unlock_rcu:
 	rcu_read_unlock();
 	return tsk;
@@ -2740,7 +2780,7 @@ void cgroup_procs_write_finish(struct task_struct *task)
 	/* release reference from cgroup_procs_write_start() */
 	put_task_struct(task);
 
-	percpu_up_write(&cgroup_threadgroup_rwsem);
+	cgroup_attach_unlock();
 	for_each_subsys(ss, ssid)
 		if (ss->post_attach)
 			ss->post_attach();
@@ -2799,7 +2839,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
 
 	lockdep_assert_held(&cgroup_mutex);
 
-	percpu_down_write(&cgroup_threadgroup_rwsem);
+	cgroup_attach_lock();
 
 	/* look up all csses currently attached to @cgrp's subtree */
 	spin_lock_irq(&css_set_lock);
@@ -2830,7 +2870,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
 	ret = cgroup_migrate_execute(&mgctx);
 out_finish:
 	cgroup_migrate_finish(&mgctx);
-	percpu_up_write(&cgroup_threadgroup_rwsem);
+	cgroup_attach_unlock();
 	return ret;
 }
 
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index dcd5755b1fe2..c6d412cebc43 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -830,8 +830,8 @@ static void rebuild_sched_domains_locked(void)
 	cpumask_var_t *doms;
 	int ndoms;
 
+	lockdep_assert_cpus_held();
 	lockdep_assert_held(&cpuset_mutex);
-	get_online_cpus();
 
 	/*
 	 * We have raced with CPU hotplug. Don't do anything to avoid
@@ -839,15 +839,13 @@ static void rebuild_sched_domains_locked(void)
 	 * Anyways, hotplug work item will rebuild sched domains.
 	 */
 	if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
-		goto out;
+		return;
 
 	/* Generate domain masks and attrs */
 	ndoms = generate_sched_domains(&doms, &attr);
 
 	/* Have scheduler rebuild the domains */
 	partition_sched_domains(ndoms, doms, attr);
-out:
-	put_online_cpus();
 }
 #else /* !CONFIG_SMP */
 static void rebuild_sched_domains_locked(void)
@@ -857,9 +855,11 @@ static void rebuild_sched_domains_locked(void)
 
 void rebuild_sched_domains(void)
 {
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 	rebuild_sched_domains_locked();
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 }
 
 /**
@@ -1528,13 +1528,9 @@ static void cpuset_attach(struct cgroup_taskset *tset)
 	cgroup_taskset_first(tset, &css);
 	cs = css_cs(css);
 
+	lockdep_assert_cpus_held();     /* see cgroup_attach_lock() */
 	mutex_lock(&cpuset_mutex);
 
-	/*
-	 * It should hold cpus lock because a cpu offline event can
-	 * cause set_cpus_allowed_ptr() failed.
-	 */
-	get_online_cpus();
 	/* prepare for attach */
 	if (cs == &top_cpuset)
 		cpumask_copy(cpus_attach, cpu_possible_mask);
@@ -1553,7 +1549,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
 		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
 		cpuset_update_task_spread_flag(cs, task);
 	}
-       put_online_cpus();
 
 	/*
 	 * Change mm for all threadgroup leaders. This is expensive and may
@@ -1617,6 +1612,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
 	cpuset_filetype_t type = cft->private;
 	int retval = 0;
 
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 	if (!is_cpuset_online(cs)) {
 		retval = -ENODEV;
@@ -1654,6 +1650,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
 	}
 out_unlock:
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 	return retval;
 }
 
@@ -1664,6 +1661,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
 	cpuset_filetype_t type = cft->private;
 	int retval = -ENODEV;
 
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 	if (!is_cpuset_online(cs))
 		goto out_unlock;
@@ -1678,6 +1676,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
 	}
 out_unlock:
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 	return retval;
 }
 
@@ -1716,6 +1715,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
 	kernfs_break_active_protection(of->kn);
 	flush_work(&cpuset_hotplug_work);
 
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 	if (!is_cpuset_online(cs))
 		goto out_unlock;
@@ -1741,6 +1741,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
 	free_trial_cpuset(trialcs);
 out_unlock:
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 	kernfs_unbreak_active_protection(of->kn);
 	css_put(&cs->css);
 	flush_workqueue(cpuset_migrate_mm_wq);
@@ -1985,6 +1986,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
 	if (!parent)
 		return 0;
 
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 
 	set_bit(CS_ONLINE, &cs->flags);
@@ -2035,6 +2037,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
 	spin_unlock_irq(&callback_lock);
 out_unlock:
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 	return 0;
 }
 
@@ -2048,6 +2051,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
 {
 	struct cpuset *cs = css_cs(css);
 
+	get_online_cpus();
 	mutex_lock(&cpuset_mutex);
 
 	if (is_sched_load_balance(cs))
@@ -2057,6 +2061,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
 	clear_bit(CS_ONLINE, &cs->flags);
 
 	mutex_unlock(&cpuset_mutex);
+	put_online_cpus();
 }
 
 static void cpuset_css_free(struct cgroup_subsys_state *css)
diff --git a/kernel/compat.c b/kernel/compat.c
index e4548a9e9c52..5f320b0db8d0 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -307,7 +307,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t,  pid, unsigned int, len,
 	if (len & (sizeof(compat_ulong_t)-1))
 		return -EINVAL;
 
-	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
 		return -ENOMEM;
 
 	ret = sched_getaffinity(pid, mask);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 46227cc48124..8d5a9fa8a951 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -741,6 +741,9 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
 {
+	if (task_on_rq_migrating(p))
+		flags |= ENQUEUE_MIGRATED;
+
 	if (task_contributes_to_load(p))
 		rq->nr_uninterruptible--;
 
@@ -4950,14 +4953,14 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
 	if (len & (sizeof(unsigned long)-1))
 		return -EINVAL;
 
-	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
 		return -ENOMEM;
 
 	ret = sched_getaffinity(pid, mask);
 	if (ret == 0) {
 		unsigned int retlen = min(len, cpumask_size());
 
-		if (copy_to_user(user_mask_ptr, mask, retlen))
+		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
 			ret = -EFAULT;
 		else
 			ret = retlen;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 84e7efda98da..eb67f42fb96b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3854,6 +3854,29 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
 #endif
 }
 
+static inline bool entity_is_long_sleeper(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq;
+	u64 sleep_time;
+
+	if (se->exec_start == 0)
+		return false;
+
+	cfs_rq = cfs_rq_of(se);
+
+	sleep_time = rq_clock_task(rq_of(cfs_rq));
+
+	/* Happen while migrating because of clock task divergence */
+	if (sleep_time <= se->exec_start)
+		return false;
+
+	sleep_time -= se->exec_start;
+	if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
+		return true;
+
+	return false;
+}
+
 static void
 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 {
@@ -3882,8 +3905,29 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 		vruntime -= thresh;
 	}
 
-	/* ensure we never gain time by being placed backwards. */
-	se->vruntime = max_vruntime(se->vruntime, vruntime);
+	/*
+	 * Pull vruntime of the entity being placed to the base level of
+	 * cfs_rq, to prevent boosting it if placed backwards.
+	 * However, min_vruntime can advance much faster than real time, with
+	 * the extreme being when an entity with the minimal weight always runs
+	 * on the cfs_rq. If the waking entity slept for a long time, its
+	 * vruntime difference from min_vruntime may overflow s64 and their
+	 * comparison may get inversed, so ignore the entity's original
+	 * vruntime in that case.
+	 * The maximal vruntime speedup is given by the ratio of normal to
+	 * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
+	 * When placing a migrated waking entity, its exec_start has been set
+	 * from a different rq. In order to take into account a possible
+	 * divergence between new and prev rq's clocks task because of irq and
+	 * stolen time, we take an additional margin.
+	 * So, cutting off on the sleep time of
+	 *     2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
+	 * should be safe.
+	 */
+	if (entity_is_long_sleeper(se))
+		se->vruntime = vruntime;
+	else
+		se->vruntime = max_vruntime(se->vruntime, vruntime);
 }
 
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -3978,6 +4022,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 
 	if (flags & ENQUEUE_WAKEUP)
 		place_entity(cfs_rq, se, 0);
+	/* Entity has migrated, no longer consider this task hot */
+	if (flags & ENQUEUE_MIGRATED)
+		se->exec_start = 0;
 
 	check_schedstat_required();
 	update_stats_enqueue(cfs_rq, se, flags);
@@ -6544,9 +6591,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
 	/* Tell new CPU we are migrated */
 	p->se.avg.last_update_time = 0;
 
-	/* We have migrated, no longer consider this task hot */
-	p->se.exec_start = 0;
-
 	update_scan_period(p, new_cpu);
 }
 
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 74e555a22de7..61269cc2fa82 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -935,6 +935,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 
 			cf = op->frames + op->cfsiz * i;
 			err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
+			if (err < 0)
+				goto free_op;
 
 			if (op->flags & CAN_FD_FRAME) {
 				if (cf->len > 64)
@@ -944,12 +946,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 					err = -EINVAL;
 			}
 
-			if (err < 0) {
-				if (op->frames != &op->sframe)
-					kfree(op->frames);
-				kfree(op);
-				return err;
-			}
+			if (err < 0)
+				goto free_op;
 
 			if (msg_head->flags & TX_CP_CAN_ID) {
 				/* copy can_id into frame */
@@ -1020,6 +1018,12 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 		bcm_tx_start_timer(op);
 
 	return msg_head->nframes * op->cfsiz + MHSIZ;
+
+free_op:
+	if (op->frames != &op->sframe)
+		kfree(op->frames);
+	kfree(op);
+	return err;
 }
 
 /*
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index c72432ce9bf5..898753328c17 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -603,7 +603,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 		truncate = true;
 	}
 
-	nhoff = skb_network_header(skb) - skb_mac_header(skb);
+	nhoff = skb_network_offset(skb);
 	if (skb->protocol == htons(ETH_P_IP) &&
 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
 		truncate = true;
@@ -612,7 +612,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 		int thoff;
 
 		if (skb_transport_header_was_set(skb))
-			thoff = skb_transport_header(skb) - skb_mac_header(skb);
+			thoff = skb_transport_offset(skb);
 		else
 			thoff = nhoff + sizeof(struct ipv6hdr);
 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 00601bc4fdfa..166b7544e54a 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -961,7 +961,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
 		truncate = true;
 	}
 
-	nhoff = skb_network_header(skb) - skb_mac_header(skb);
+	nhoff = skb_network_offset(skb);
 	if (skb->protocol == htons(ETH_P_IP) &&
 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
 		truncate = true;
@@ -970,7 +970,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
 		int thoff;
 
 		if (skb_transport_header_was_set(skb))
-			thoff = skb_transport_header(skb) - skb_mac_header(skb);
+			thoff = skb_transport_offset(skb);
 		else
 			thoff = nhoff + sizeof(struct ipv6hdr);
 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 0a76ad05e5ae..2974f7262f88 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -236,6 +236,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 		result = tcf_classify(skb, fl, &res, true);
 		if (!fl || result < 0)
 			goto fallback;
+		if (result == TC_ACT_SHOT)
+			return NULL;
 
 		cl = (void *)res.class;
 		if (!cl) {
@@ -256,8 +258,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 		case TC_ACT_TRAP:
 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 			/* fall through */
-		case TC_ACT_SHOT:
-			return NULL;
 		case TC_ACT_RECLASSIFY:
 			return cbq_reclassify(skb, cl);
 		}
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 2864698436a5..6a49f897c4d9 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -441,7 +441,7 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
 		pao = hpi_find_adapter(phm->adapter_index);
 	} else {
 		/* subsys messages don't address an adapter */
-		_HPI_6205(NULL, phm, phr);
+		phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
 		return;
 	}
 
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index ca8a37388d56..9f0e6bbc523c 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -3620,8 +3620,10 @@ static int tuning_ctl_set(struct hda_codec *codec, hda_nid_t nid,
 
 	for (i = 0; i < TUNING_CTLS_COUNT; i++)
 		if (nid == ca0132_tuning_ctls[i].nid)
-			break;
+			goto found;
 
+	return -EINVAL;
+found:
 	snd_hda_power_up(codec);
 	dspio_set_param(codec, ca0132_tuning_ctls[i].mid, 0x20,
 			ca0132_tuning_ctls[i].req,
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 69f88d3abf50..cfa958dc2dd5 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -952,7 +952,10 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
 	SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
-	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_PINCFG_LENOVO_NOTEBOOK),
+	/* NOTE: we'd need to extend the quirk for 17aa:3977 as the same
+	 * PCI SSID is used on multiple Lenovo models
+	 */
+	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
@@ -974,6 +977,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
 	{ .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
 	{ .id = CXT_FIXUP_MUTE_LED_GPIO, .name = "mute-led-gpio" },
 	{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
+	{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
 	{}
 };
 
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 01ba7a939ac4..342d6edb06ad 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -53,8 +53,12 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
 	case UAC_VERSION_1:
 	default: {
 		struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
-		if (format >= 64)
-			return 0; /* invalid format */
+		if (format >= 64) {
+			usb_audio_info(chip,
+				       "%u:%d: invalid format type 0x%llx is detected, processed as PCM\n",
+				       fp->iface, fp->altsetting, format);
+			format = UAC_FORMAT_TYPE_I_PCM;
+		}
 		sample_width = fmt->bBitResolution;
 		sample_bytes = fmt->bSubframeSize;
 		format = 1ULL << format;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ