lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1377224142-25160-6-git-send-email-jeffrey.t.kirsher@intel.com>
Date:	Thu, 22 Aug 2013 19:15:39 -0700
From:	Jeff Kirsher <jeffrey.t.kirsher@...el.com>
To:	davem@...emloft.net
Cc:	Jesse Brandeburg <jesse.brandeburg@...el.com>,
	netdev@...r.kernel.org, gospo@...hat.com, sassmann@...hat.com,
	Shannon Nelson <shannon.nelson@...el.com>,
	Mitch Williams <mitch.a.williams@...el.com>,
	PJ Waskiewicz <peter.p.waskiewicz.jr@...el.com>,
	e1000-devel@...ts.sourceforge.net,
	Jeff Kirsher <jeffrey.t.kirsher@...el.com>
Subject: [net-next v2 5/8] i40e: implement virtual device interface

From: Jesse Brandeburg <jesse.brandeburg@...el.com>

While not part of this patch series, an i40evf driver is on its
way, and uses these files to communicate to the PF driver.

This patch contains the header and implementation files for the
PF to VF interface.

Signed-off-by: Jesse Brandeburg <jesse.brandeburg@...el.com>
Signed-off-by: Shannon Nelson <shannon.nelson@...el.com>
Signed-off-by: Mitch Williams <mitch.a.williams@...el.com>
CC: PJ Waskiewicz <peter.p.waskiewicz.jr@...el.com>
CC: e1000-devel@...ts.sourceforge.net
Tested-by: Kavindya Deegala <kavindya.s.deegala@...el.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@...el.com>
---
v1: this is the initial submittal
v2: address upstream comments and internal fixes
    kernel memory allocators already print warnings
    various fixes based on internal review
    add Mitch's sign-off (i40evf driver owner)
---
 drivers/net/ethernet/intel/i40e/i40e_virtchnl.h    |  368 +++
 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2437 ++++++++++++++++++++
 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |  123 +
 3 files changed, 2928 insertions(+)
 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h

diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
new file mode 100644
index 0000000..fd4ff5e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -0,0 +1,368 @@
+/*******************************************************************************
+
+  Intel Ethernet Controller XL710 Family Linux Driver
+  Copyright(c) 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@...ts.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+	I40E_VIRTCHNL_OP_UNKNOWN = 0,
+	I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+	I40E_VIRTCHNL_OP_RESET_VF,
+	I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+	I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+	I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+	I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+	I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+	I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+	I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+	I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+	I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+	I40E_VIRTCHNL_OP_ADD_VLAN,
+	I40E_VIRTCHNL_OP_DEL_VLAN,
+	I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+	I40E_VIRTCHNL_OP_GET_STATS,
+	I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+	I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
+	enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+	enum i40e_status_code v_retval;  /* ditto for desc->retval */
+	u32 vfid;			 /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR		0
+struct i40e_virtchnl_version_info {
+	u32 major;
+	u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	enum i40e_vsi_type vsi_type;
+	u16 qset_handle;
+	u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE	0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN	0x00010000
+
+struct i40e_virtchnl_vf_resource {
+	u16 num_vsis;
+	u16 num_queue_pairs;
+	u16 max_vectors;
+	u16 max_mtu;
+
+	u32 vf_offload_flags;
+	u32 max_fcoe_contexts;
+	u32 max_fcoe_filters;
+
+	struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u16 ring_len;		/* number of descriptors, multiple of 8 */
+	u16 headwb_enabled;
+	u64 dma_ring_addr;
+	u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u32 ring_len;		/* number of descriptors, multiple of 32 */
+	u16 hdr_size;
+	u16 splithdr_enabled;
+	u32 databuffer_size;
+	u32 max_pkt_size;
+	u64 dma_ring_addr;
+	enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+	/* NOTE: vsi_id and queue_id should be identical for both queues. */
+	struct i40e_virtchnl_txq_info txq;
+	struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+	u16 vsi_id;
+	u16 vector_id;
+	u16 rxq_map;
+	u16 txq_map;
+	u16 rxitr_idx;
+	u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+	u16 num_vectors;
+	struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+	u16 vsi_id;
+	u16 pad;
+	u32 rx_queues;
+	u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+	u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+	u16 vsi_id;
+	u16 num_elements;
+	struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+	u16 vsi_id;
+	u16 num_elements;
+	u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+	u16 vsi_id;
+	u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC	0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC	0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+	I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+	I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+	I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+	I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO		0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM	255
+
+struct i40e_virtchnl_pf_event {
+	enum i40e_virtchnl_event_codes event;
+	union {
+		struct {
+			enum i40e_aq_link_speed link_speed;
+			bool link_status;
+		} link_event;
+	} event_data;
+
+	int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+	I40E_VFR_INPROGRESS = 0,
+	I40E_VFR_COMPLETED,
+	I40E_VFR_VFACTIVE,
+	I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
new file mode 100644
index 0000000..06f98fc
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -0,0 +1,2437 @@
+/*******************************************************************************
+
+  Intel Ethernet Controller XL710 Family Linux Driver
+  Copyright(c) 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@...ts.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "i40e.h"
+
+/***********************misc routines*****************************/
+
+/**
+ * i40e_vc_isvalid_vsi_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vf relative vsi id
+ *
+ * check for the valid vsi id
+ **/
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+{
+	struct i40e_pf *pf = vf->pf;
+
+	return (pf->vsi[vsi_id]->vf_id == vf->vf_id);
+}
+
+/**
+ * i40e_vc_isvalid_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vsi id
+ * @qid: vsi relative queue id
+ *
+ * check for the valid queue id
+ **/
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+					    u8 qid)
+{
+	struct i40e_pf *pf = vf->pf;
+
+	return (qid < pf->vsi[vsi_id]->num_queue_pairs);
+}
+
+/**
+ * i40e_vc_isvalid_vector_id
+ * @vf: pointer to the vf info
+ * @vector_id: vf relative vector id
+ *
+ * check for the valid vector id
+ **/
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+{
+	struct i40e_pf *pf = vf->pf;
+
+	return (vector_id < pf->hw.func_caps.num_msix_vectors_vf);
+}
+
+/***********************vf resource mgmt routines*****************/
+
+/**
+ * i40e_vc_get_pf_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue id
+ *
+ * return pf relative queue id
+ **/
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+				   u8 vsi_queue_id)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+
+	if (vsi->info.mapping_flags & I40E_AQ_VSI_QUE_MAP_NONCONTIG)
+		pf_queue_id = vsi->info.queue_mapping[vsi_queue_id];
+	else
+		pf_queue_id = vsi->info.queue_mapping[0] + vsi_queue_id;
+
+	return pf_queue_id;
+}
+
+/**
+ * i40e_ctrl_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static enum i40e_status_code i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf,
+						    u16 vsi_idx,
+						    u16 vsi_queue_id,
+						    enum i40e_queue_ctrl ctrl)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+	u32 reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
+	bool writeback = false;
+
+	switch (ctrl) {
+	case I40E_QUEUE_CTRL_ENABLE:
+		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_ENABLECHECK:
+		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ?
+		    I40E_SUCCESS : I40E_ERR_CONFIG;
+		break;
+	case I40E_QUEUE_CTRL_DISABLE:
+		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_DISABLECHECK:
+		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ?
+		    I40E_ERR_CONFIG : I40E_SUCCESS;
+		break;
+	case I40E_QUEUE_CTRL_FASTDISABLE:
+		reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ?
+		    I40E_ERR_CONFIG : I40E_SUCCESS;
+		if (ret == I40E_SUCCESS) {
+			reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
+			writeback = true;
+		}
+		break;
+	default:
+		ret = I40E_ERR_PARAM;
+		break;
+	}
+
+	if (writeback) {
+		wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
+		flush(hw);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_ctrl_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static enum i40e_status_code i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf,
+						    u16 vsi_idx,
+						    u16 vsi_queue_id,
+						    enum i40e_queue_ctrl ctrl)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+	u32 reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
+	bool writeback = false;
+
+	switch (ctrl) {
+	case I40E_QUEUE_CTRL_ENABLE:
+		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_ENABLECHECK:
+		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ?
+		    I40E_SUCCESS : I40E_ERR_CONFIG;
+		break;
+	case I40E_QUEUE_CTRL_DISABLE:
+		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_DISABLECHECK:
+		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ?
+		    I40E_ERR_CONFIG : I40E_SUCCESS;
+		break;
+	case I40E_QUEUE_CTRL_FASTDISABLE:
+		reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ?
+		    I40E_ERR_CONFIG : I40E_SUCCESS;
+		if (ret == I40E_SUCCESS) {
+			reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
+			writeback = true;
+		}
+		break;
+	default:
+		ret = I40E_ERR_PARAM;
+		break;
+	}
+
+	if (writeback) {
+		wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
+		flush(hw);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_config_irq_link_list
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vecmap: irq map info
+ *
+ * configure irq link list from the map
+ **/
+static enum i40e_status_code i40e_config_irq_link_list(struct i40e_vf *vf,
+						       u16 vsi_idx,
+						       struct
+						       i40e_virtchnl_vector_map
+						       *vecmap)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	enum i40e_queue_type qtype;
+	u32 reg, reg_idx;
+	unsigned long linklistmap = 0, tempmap;
+	u16 vsi_queue_id, pf_queue_id;
+	u16 next_q, itr_idx, vector_id;
+
+	vector_id = vecmap->vector_id;
+	/* setup the head */
+	if (0 == vector_id)
+		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+	else
+		reg_idx = I40E_VPINT_LNKLSTN(
+			    ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+					      * vf->vf_id) + (vector_id - 1));
+
+	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
+		/* Special case - No queues mapped on this vector */
+		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+		goto irq_list_done;
+	}
+	tempmap = vecmap->rxq_map;
+	vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (vsi_queue_id < I40E_MAX_VSI_QP) {
+		linklistmap |= (1 <<
+				(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+				 vsi_queue_id));
+		vsi_queue_id =
+		    find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
+	}
+
+	tempmap = vecmap->txq_map;
+	vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (vsi_queue_id < I40E_MAX_VSI_QP) {
+		linklistmap |= (1 <<
+				(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+				 + 1));
+		vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					     vsi_queue_id + 1);
+	}
+
+	next_q = find_first_bit(&linklistmap,
+				(I40E_MAX_VSI_QP *
+				 I40E_VIRTCHNL_SUPPORTED_QTYPES));
+	vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
+	qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
+	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
+
+	wr32(hw, reg_idx, reg);
+
+	while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+		switch (qtype) {
+		case I40E_QUEUE_TYPE_RX:
+			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
+			itr_idx = vecmap->rxitr_idx;
+			break;
+		case I40E_QUEUE_TYPE_TX:
+			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
+			itr_idx = vecmap->txitr_idx;
+			break;
+		default:
+			break;
+		}
+
+		next_q = find_next_bit(&linklistmap,
+				       (I40E_MAX_VSI_QP *
+					I40E_VIRTCHNL_SUPPORTED_QTYPES),
+				       next_q + 1);
+		if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
+			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
+			pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+							      vsi_queue_id);
+		} else {
+			pf_queue_id = I40E_QUEUE_END_OF_LIST;
+			qtype = 0;
+		}
+
+		/* format for the RQCTL & TQCTL regs is same */
+		reg = (vector_id) |
+		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+		    (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+		wr32(hw, reg_idx, reg);
+	}
+
+irq_list_done:
+	flush(hw);
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_config_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure tx queue
+ **/
+static enum i40e_status_code i40e_config_vsi_tx_queue(struct i40e_vf *vf,
+						      u16 vsi_idx,
+						      u16 vsi_queue_id,
+						      struct
+						      i40e_virtchnl_txq_info
+						      *info)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_hmc_obj_txq tx_ctx;
+	enum i40e_status_code ret;
+	u32 qtx_ctl;
+	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+	/* clear the context structure first */
+	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
+
+	/* only set the required fields */
+	tx_ctx.base = info->dma_ring_addr / 128;
+	tx_ctx.qlen = info->ring_len;
+	tx_ctx.rdylist = pf->vsi[vsi_idx]->info.qs_handle[0];
+	tx_ctx.rdylist_act = 0;
+
+	/* clear the context in the HMC */
+	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
+	if (ret != I40E_SUCCESS) {
+		dev_err(&pf->pdev->dev,
+			"%s: Failed to clear LAN Tx queue context %d, error: %d\n",
+			__func__, pf_queue_id, ret);
+		goto error_context;
+	}
+
+	/* set the context in the HMC */
+	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
+	if (ret != I40E_SUCCESS) {
+		dev_err(&pf->pdev->dev,
+			"%s: Failed to set LAN Tx queue context %d error: %d\n",
+			__func__, pf_queue_id, ret);
+		goto error_context;
+	}
+
+	/* associate this queue with the PCI VF function */
+	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
+	qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+		    & I40E_QTX_CTL_PF_INDX_MASK);
+	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
+		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+		    & I40E_QTX_CTL_VFVM_INDX_MASK);
+	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
+	flush(hw);
+
+error_context:
+	return ret;
+}
+
+/**
+ * i40e_config_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure rx queue
+ **/
+static enum i40e_status_code i40e_config_vsi_rx_queue(struct i40e_vf *vf,
+						      u16 vsi_idx,
+						      u16 vsi_queue_id,
+						      struct
+						      i40e_virtchnl_rxq_info
+						      *info)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_hmc_obj_rxq rx_ctx;
+	enum i40e_status_code ret;
+	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+	/* clear the context structure first */
+	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+
+	/* only set the required fields */
+	rx_ctx.base = info->dma_ring_addr / 128;
+	rx_ctx.qlen = info->ring_len;
+
+	if (info->splithdr_enabled) {
+		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2	|
+				  I40E_RX_SPLIT_IP	|
+				  I40E_RX_SPLIT_TCP_UDP |
+				  I40E_RX_SPLIT_SCTP;
+		/* header length validation */
+		if (info->hdr_size > ((2 * 1024) - 64)) {
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+		/* set splitalways mode 10b */
+		rx_ctx.dtype = 0x2;
+	}
+
+	/* databuffer length validation */
+	if (info->databuffer_size > ((16 * 1024) - 128)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+	/* max pkt. length validation */
+	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	rx_ctx.rxmax = info->max_pkt_size;
+
+	/* enable 32bytes desc always */
+	rx_ctx.dsize = 1;
+
+	/* default values */
+	rx_ctx.tphrdesc_ena = 1;
+	rx_ctx.tphwdesc_ena = 1;
+	rx_ctx.tphdata_ena = 1;
+	rx_ctx.tphhead_ena = 1;
+	rx_ctx.lrxqthresh = 2;
+	rx_ctx.crcstrip = 1;
+
+	/* clear the context in the HMC */
+	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
+	if (ret != I40E_SUCCESS) {
+		dev_err(&pf->pdev->dev,
+			"%s: Failed to clear LAN Rx queue context %d, error: %d\n",
+			__func__, pf_queue_id, ret);
+		goto error_context;
+	}
+
+	/* set the context in the HMC */
+	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
+	if (ret != I40E_SUCCESS)
+		dev_err(&pf->pdev->dev,
+			"%s: Failed to set LAN Rx queue context %d error: %d\n",
+			__func__, pf_queue_id, ret);
+
+error_param:
+error_context:
+	return ret;
+}
+
+/**
+ * i40e_alloc_vsi_res
+ * @vf: pointer to the vf info
+ * @type: type of VSI to allocate
+ *
+ * alloc vf vsi context & resources
+ **/
+static enum i40e_status_code i40e_alloc_vsi_res(struct i40e_vf *vf,
+						enum i40e_vsi_type type)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vsi *vsi;
+	struct i40e_mac_filter *f = NULL;
+	enum i40e_status_code ret = I40E_SUCCESS;
+
+	vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
+
+	if (!vsi) {
+		dev_err(&pf->pdev->dev,
+			"%s: add vsi failed for vf %d, aq_err %d\n",
+			__func__, vf->vf_id, pf->hw.aq.asq_last_status);
+		goto error_alloc_vsi_res;
+	}
+	if (type == I40E_VSI_SRIOV) {
+		vf->lan_vsi_index = vsi->idx;
+		vf->lan_vsi_id = vsi->id;
+		dev_info(&pf->pdev->dev,
+			 "%s: LAN VSI index %d, VSI id %d\n",
+			 __func__, vsi->idx, vsi->id);
+		f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+				    0, true, false);
+	}
+	if (NULL == f) {
+		dev_err(&pf->pdev->dev,
+			"%s: Unable to add ucast filter\n", __func__);
+		ret = I40E_ERR_NO_MEMORY;
+		goto error_alloc_vsi_res;
+	}
+
+	/* program mac filter */
+	if (I40E_SUCCESS != i40e_sync_vsi_filters(vsi)) {
+		dev_err(&pf->pdev->dev,
+			"%s: Unable to program ucast filters\n", __func__);
+		ret = I40E_ERR_CONFIG;
+		goto error_alloc_vsi_res;
+	}
+
+	/* accept bcast pkts. by default */
+	ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+	if (I40E_SUCCESS != ret)
+		dev_err(&pf->pdev->dev,
+			"%s: set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
+			__func__, vf->vf_id, vsi->idx,
+			pf->hw.aq.asq_last_status);
+
+error_alloc_vsi_res:
+	return ret;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+enum i40e_status_code i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	enum i40e_status_code ret = I40E_ERR_PARAM;
+	u32 reg, reg_idx, msix_vf;
+	u16 pf_queue_id;
+	int i, j;
+	bool rsd = false;
+
+	/* warn the VF */
+	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
+
+	clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+	/* PF triggers VFR only when VF requests, in case of
+	 * VFLR, HW triggers VFR
+	 */
+	if (!flr) {
+		/* reset vf using VPGEN_VFRTRIG reg */
+		reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+		flush(hw);
+	}
+
+	/* poll VPGEN_VFRSTAT reg to make sure
+	 * that reset is complete
+	 */
+	for (i = 0; i < 4; i++) {
+		/* vf reset requires driver to first reset the
+		 * vf & than poll the status register to make sure
+		 * that the requested op was completed
+		 * successfully
+		 */
+		udelay(10);
+		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+			rsd = true;
+			break;
+		}
+	}
+
+	if (!rsd)
+		dev_err(&pf->pdev->dev, "%s: VF reset check timeout %d\n",
+			__func__, vf->vf_id);
+
+	/* fast disable qps */
+	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+		ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+					     I40E_QUEUE_CTRL_FASTDISABLE);
+		ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+					     I40E_QUEUE_CTRL_FASTDISABLE);
+	}
+
+	/* Queue enable/disable requires driver to
+	 * first reset the vf & than poll the status register
+	 * to make sure that the requested op was completed
+	 * successfully
+	 */
+	udelay(10);
+	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+		ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+					     I40E_QUEUE_CTRL_FASTDISABLECHECK);
+		if (ret != I40E_SUCCESS)
+			dev_info(&pf->pdev->dev,
+				 "%s: Queue control check failed on Tx queue %d of VSI %d VF %d\n",
+				 __func__, vf->lan_vsi_index, j, vf->vf_id);
+		ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+					     I40E_QUEUE_CTRL_FASTDISABLECHECK);
+		if (ret != I40E_SUCCESS)
+			dev_info(&pf->pdev->dev,
+				 "%s: Queue control check failed on Rx queue %d of VSI %d VF %d\n",
+				 __func__, vf->lan_vsi_index, j, vf->vf_id);
+	}
+
+	/* clear the irq settings */
+	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+	for (i = 0; i < msix_vf; i++) {
+		/* format is same for both registers */
+		if (0 == i)
+			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+		else
+			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+						      (vf->vf_id))
+						     + (i - 1));
+		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+		wr32(hw, reg_idx, reg);
+		flush(hw);
+	}
+	/* disable interrupts so the VF starts in a known state */
+	for (i = 0; i < msix_vf; i++) {
+		/* format is same for both registers */
+		if (0 == i)
+			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+		else
+			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+						      (vf->vf_id))
+						     + (i - 1));
+		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+		flush(hw);
+	}
+
+	/* set the defaults for the rqctl & tqctl registers */
+	reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
+	       I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
+	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+		pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+		wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
+		wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
+	}
+
+	/* clear the reset bit in the VPGEN_VFRTRIG reg */
+	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+	/* tell the VF the reset is done */
+	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+	flush(hw);
+
+	return ret;
+}
+
+/**
+ * i40e_enable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * enable vf mappings
+ **/
+static enum i40e_status_code i40e_enable_vf_mappings(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg, total_queue_pairs = 0;
+	int j;
+
+	/* Tell the hardware we're using noncontiguous mapping. HW requires
+	 * that VF queues be mapped using this method, even when they are
+	 * contiguous in real life
+	 */
+	wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+	/* enable VF vplan_qtable mappings */
+	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
+	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
+
+	/* map PF queues to VF queues */
+	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+		u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+		reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+		wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
+		total_queue_pairs++;
+	}
+
+	/* map PF queues to VSI */
+	for (j = 0; j < 7; j++) {
+		if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
+			reg = 0x07FF07FF;	/* unused */
+		} else {
+			u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+							  j * 2);
+			reg = qid;
+			qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+						      (j * 2) + 1);
+			reg |= qid << 16;
+		}
+		wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
+	}
+
+	flush(hw);
+
+	return I40E_SUCCESS;
+}
+/**
+ * i40e_disable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * disable vf mappings
+ **/
+static enum i40e_status_code i40e_disable_vf_mappings(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	int i;
+
+	/* disable qp mappings */
+	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
+	for (i = 0; i < I40E_MAX_VSI_QP; i++)
+		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
+		     I40E_QUEUE_END_OF_LIST);
+	flush(hw);
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_free_vf_res
+ * @vf: pointer to the vf info
+ *
+ * free vf resources
+ **/
+static enum i40e_status_code i40e_free_vf_res(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	enum i40e_status_code ret = I40E_ERR_PARAM;
+
+	/* free vsi & disconnect it from the parent uplink */
+	if (vf->lan_vsi_index) {
+		ret = i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
+		vf->lan_vsi_index = 0;
+		vf->lan_vsi_id = 0;
+	}
+	/* reset some of the state varibles keeping
+	 * track of the resources
+	 */
+	vf->num_queue_pairs = 0;
+	vf->vf_states = 0;
+
+	return ret;
+}
+
+/**
+ * i40e_alloc_vf_res
+ * @vf: pointer to the vf info
+ *
+ * allocate vf resources
+ **/
+static enum i40e_status_code i40e_alloc_vf_res(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	enum i40e_status_code ret;
+	int total_queue_pairs = 0;
+
+	/* allocate hw vsi context & associated resources */
+	ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
+	if (I40E_SUCCESS != ret)
+		goto error_alloc;
+	total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+	set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+
+	/* store the total qps number for the runtime
+	 * vf req validation
+	 */
+	vf->num_queue_pairs = total_queue_pairs;
+
+	/* vf is now completely initialized */
+	set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+
+error_alloc:
+	if (I40E_SUCCESS != ret)
+		i40e_free_vf_res(vf);
+
+	return ret;
+}
+
+/**
+ * i40e_vfs_are_assigned
+ * @pf: pointer to the pf structure
+ *
+ * Determine if any VFs are assigned to VMs
+ **/
+static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
+{
+	struct pci_dev *pdev = pf->pdev;
+	struct pci_dev *vfdev;
+
+	/* loop through all the VFs to see if we own any that are assigned */
+	vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+	while (vfdev) {
+		/* if we don't own it we don't care */
+		if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
+			/* if it is assigned we cannot release it */
+			if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+				return true;
+		}
+
+		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				       I40E_VF_DEVICE_ID,
+				       vfdev);
+	}
+
+	return false;
+}
+
+
+/**
+ * i40e_free_vfs
+ * @pf: pointer to the pf structure
+ *
+ * free vf resources
+ **/
+enum i40e_status_code i40e_free_vfs(struct i40e_pf *pf)
+{
+	enum i40e_status_code ret = I40E_SUCCESS;
+	int i;
+	struct i40e_hw *hw = &pf->hw;
+
+	if (!pf->vf)
+		return I40E_SUCCESS;
+
+	/* Disable interrupt 0 so we don't try to handle the VFLR. */
+	wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+	flush(hw);
+
+	/* free up vf resources */
+	for (i = 0; i < pf->num_alloc_vfs; i++) {
+		if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+			ret = i40e_free_vf_res(&pf->vf[i]);
+		/* disable qp mappings */
+		i40e_disable_vf_mappings(&pf->vf[i]);
+	}
+
+	kfree(pf->vf);
+	pf->vf = NULL;
+	pf->num_alloc_vfs = 0;
+
+	if (!i40e_vfs_are_assigned(pf))
+		pci_disable_sriov(pf->pdev);
+	else
+		dev_warn(&pf->pdev->dev,
+			 "%s: unable to disable SR-IOV because VFs are assigned.\n",
+			 __func__);
+
+	/* Re-enable interrupt 0. */
+	wr32(hw, I40E_PFINT_DYN_CTL0,
+		 I40E_PFINT_DYN_CTL0_INTENA_MASK |
+		 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+		 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
+	flush(hw);
+	return ret;
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * i40e_alloc_vfs
+ * @pf: pointer to the pf structure
+ * @num_alloc_vfs: number of vfs to allocate
+ *
+ * allocate vf resources
+ **/
+static enum i40e_status_code i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+{
+	struct i40e_vf *vfs;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	int i, err = 0;
+
+	err = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+	if (err) {
+		dev_err(&pf->pdev->dev, "%s: pci_enable_sriov failed with error %d!\n",
+			__func__, err);
+		pf->num_alloc_vfs = 0;
+		ret = I40E_ERR_CONFIG;
+		goto err_iov;
+	}
+
+	/* allocate memory */
+	vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
+	if (!vfs) {
+		ret = I40E_ERR_NO_MEMORY;
+		goto err_alloc;
+	}
+
+	/* apply default profile */
+	for (i = 0; i < num_alloc_vfs; i++) {
+		vfs[i].pf = pf;
+		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
+		vfs[i].vf_id = i;
+
+		/* assign default capabilities */
+		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+
+		ret = i40e_alloc_vf_res(&vfs[i]);
+		i40e_reset_vf(&vfs[i], true);
+		if (I40E_SUCCESS != ret)
+			break;
+
+		/* enable vf vplan_qtable mappings */
+		ret = i40e_enable_vf_mappings(&vfs[i]);
+	}
+	pf->vf = vfs;
+	pf->num_alloc_vfs = num_alloc_vfs;
+
+err_alloc:
+	if (I40E_SUCCESS != ret)
+		i40e_free_vfs(pf);
+err_iov:
+	return ret;
+}
+
+#endif
+/**
+ * i40e_pci_sriov_enable
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs
+ **/
+static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int err = 0;
+	int pre_existing_vfs = pci_num_vf(pdev);
+
+	dev_info(&pdev->dev, "%s: Allocating %d VFs.\n", __func__, num_vfs);
+	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+		err = i40e_free_vfs(pf);
+	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+		goto out;
+
+	if (err)
+		goto err_out;
+
+	if (num_vfs > pf->num_req_vfs) {
+		err = -EPERM;
+		goto err_out;
+	}
+
+	err = i40e_alloc_vfs(pf, num_vfs);
+	if (err) {
+		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
+		goto err_out;
+	}
+
+out:
+	return num_vfs;
+
+err_out:
+	return err;
+#endif
+	return 0;
+}
+
+/**
+ * i40e_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+	if (num_vfs == 0)
+		return i40e_free_vfs(pf);
+	else
+		return i40e_pci_sriov_enable(pdev, num_vfs);
+}
+
+/***********************virtual channel routines******************/
+
+/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the vf info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to vf
+ **/
+static enum i40e_status_code i40e_vc_send_msg_to_vf(struct i40e_vf *vf,
+						    u32 v_opcode, u32 v_retval,
+						    u8 *msg, u16 msglen)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	enum i40e_status_code ret;
+
+	/* single place to detect unsuccessful return values */
+	if (v_retval != I40E_SUCCESS) {
+		vf->num_invalid_msgs++;
+		dev_err(&pf->pdev->dev, "%s: Failed opcode %d Error: %d\n",
+			__func__, v_opcode, v_retval);
+		if (vf->num_invalid_msgs >
+		    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
+			dev_err(&pf->pdev->dev,
+				"%s: Number of invalid messages exceeded for VF %d\n",
+				__func__, vf->vf_id);
+			dev_err(&pf->pdev->dev,
+				"%s: Use PF Control I/F to enable the VF\n",
+				__func__);
+			set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+		}
+	} else {
+		vf->num_valid_msgs++;
+	}
+
+	ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+				     msg, msglen, NULL);
+	if (I40E_SUCCESS != ret)
+		dev_err(&pf->pdev->dev,
+			"%s: Unable to send the message to VF %d aq_err %d\n",
+			__func__, vf->vf_id, pf->hw.aq.asq_last_status);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_send_resp_to_vf
+ * @vf: pointer to the vf info
+ * @opcode: operation code
+ * @retval: return value
+ *
+ * send resp msg to vf
+ **/
+static enum i40e_status_code i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+					enum i40e_virtchnl_ops opcode,
+					enum i40e_status_code retval)
+{
+	enum i40e_status_code ret;
+
+	ret = i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_get_version_msg
+ * @vf: pointer to the vf info
+ *
+ * called from the vf to request the API version used by the PF
+ **/
+static enum i40e_status_code i40e_vc_get_version_msg(struct i40e_vf *vf)
+{
+	struct i40e_virtchnl_version_info info = {
+		I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
+	};
+
+	return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+				      I40E_SUCCESS, (u8 *)&info,
+				      sizeof(struct
+					     i40e_virtchnl_version_info));
+}
+
+/**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to request its resources
+ **/
+static enum i40e_status_code i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_virtchnl_vf_resource *vfres = NULL;
+	struct i40e_vsi *vsi;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	int num_vsis = 1;
+	int i = 0, len = 0;
+
+	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+		ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	len = (sizeof(struct i40e_virtchnl_vf_resource) +
+	       sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
+
+	vfres = kzalloc(len, GFP_KERNEL);
+	if (!vfres) {
+		ret = I40E_ERR_NO_MEMORY;
+		len = 0;
+		goto err;
+	}
+
+	vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+	vsi = pf->vsi[vf->lan_vsi_index];
+	if (!vsi->info.pvid)
+		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+	vfres->num_vsis = num_vsis;
+	vfres->num_queue_pairs = vf->num_queue_pairs;
+	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+	if (vf->lan_vsi_index) {
+		vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+		vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
+		vfres->vsi_res[i].num_queue_pairs =
+		    pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+		memcpy(vfres->vsi_res[i].default_mac_addr,
+		       vf->default_lan_addr.addr, ETH_ALEN);
+		i++;
+	}
+	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+err:
+	/* send the response back to the vf */
+	ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+				     ret, (u8 *) vfres, len);
+
+	kfree(vfres);
+	return ret;
+}
+
+/**
+ * i40e_vc_reset_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to reset itself,
+ * unlike other virtchnl messages, pf driver
+ * doesn't send the response back to the vf
+ **/
+static enum i40e_status_code i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+{
+	enum i40e_status_code ret;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	ret = i40e_reset_vf(vf, false);
+
+error_param:
+	return ret;
+}
+
+/**
+ * i40e_vc_config_promiscuous_mode_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the promiscuous mode of
+ * vf vsis
+ **/
+static enum i40e_status_code i40e_vc_config_promiscuous_mode_msg(struct i40e_vf
+								 *vf, u8 *msg,
+								 u16 msglen)
+{
+	struct i40e_virtchnl_promisc_info *info =
+	    (struct i40e_virtchnl_promisc_info *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	enum i40e_status_code ret;
+	bool promisc = false, allmulti = false;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+	    (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+		promisc = true;
+	ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
+						  promisc, NULL);
+	if (ret)
+		goto error_param;
+
+	if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+		allmulti = true;
+	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
+						    allmulti, NULL);
+
+error_param:
+	/* send the response to the vf */
+	ret = i40e_vc_send_resp_to_vf(vf,
+				      I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+				      ret);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_config_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the rx/tx
+ * queues
+ **/
+static enum i40e_status_code i40e_vc_config_queues_msg(struct i40e_vf *vf,
+						       u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_vsi_queue_config_info *qci =
+	    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+	struct i40e_virtchnl_queue_pair_info *qpi;
+	enum i40e_status_code ret = I40E_ERR_PARAM;
+	int i;
+	u16 vsi_id, vsi_queue_id;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	vsi_id = qci->vsi_id;
+	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	for (i = 0; i < qci->num_queue_pairs; i++) {
+		qpi = &qci->qpair[i];
+		vsi_queue_id = qpi->txq.queue_id;
+		if ((qpi->txq.vsi_id != vsi_id) ||
+		    (qpi->rxq.vsi_id != vsi_id) ||
+		    (qpi->rxq.queue_id != vsi_queue_id) ||
+		    !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+
+		ret = i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
+					       &qpi->rxq);
+		if (ret != I40E_SUCCESS)
+			break;
+		ret = i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
+					       &qpi->txq);
+		if (ret != I40E_SUCCESS)
+			break;
+	}
+
+error_param:
+	/* send the response to the vf */
+	ret = i40e_vc_send_resp_to_vf(vf,
+					    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+					    ret);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_config_irq_map_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the irq to
+ * queue map
+ **/
+static enum i40e_status_code i40e_vc_config_irq_map_msg(struct i40e_vf *vf,
+							u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_irq_map_info *irqmap_info =
+	    (struct i40e_virtchnl_irq_map_info *)msg;
+	struct i40e_virtchnl_vector_map *map;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u16 vsi_id, vsi_queue_id, vector_id;
+	unsigned long tempmap;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < irqmap_info->num_vectors; i++) {
+		map = &irqmap_info->vecmap[i];
+
+		vector_id = map->vector_id;
+		vsi_id = map->vsi_id;
+		/* validate msg params */
+		if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
+		    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+
+		/* lookout for the invalid queue index */
+		tempmap = map->rxq_map;
+		vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+		while (vsi_queue_id < I40E_MAX_VSI_QP) {
+			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+						      vsi_queue_id)) {
+				ret = I40E_ERR_PARAM;
+				goto error_param;
+			}
+			vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+						     vsi_queue_id + 1);
+		}
+
+		tempmap = map->txq_map;
+		vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+		while (vsi_queue_id < I40E_MAX_VSI_QP) {
+			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+						      vsi_queue_id)) {
+				ret = I40E_ERR_PARAM;
+				goto error_param;
+			}
+			vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+						     vsi_queue_id + 1);
+		}
+
+		ret = i40e_config_irq_link_list(vf, vsi_id, map);
+	}
+error_param:
+	/* send the response to the vf */
+	ret = i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, ret);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_enable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to enable all or specific queue(s)
+ **/
+static enum i40e_status_code i40e_vc_enable_queues_msg(struct i40e_vf *vf,
+						       u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_queue_select *vqs =
+	    (struct i40e_virtchnl_queue_select *)msg;
+	struct i40e_pf *pf = vf->pf;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u16 vsi_id = vqs->vsi_id;
+	u16 queue_id;
+	unsigned long tempmap;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	tempmap = vqs->rx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		ret = i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+					     I40E_QUEUE_CTRL_ENABLE);
+
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	tempmap = vqs->tx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		ret = i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+					     I40E_QUEUE_CTRL_ENABLE);
+
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	/* Poll the status register to make sure that the
+	 * requested op was completed successfully
+	 */
+	udelay(10);
+
+	tempmap = vqs->rx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		ret = i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+					     I40E_QUEUE_CTRL_ENABLECHECK);
+		if (ret != I40E_SUCCESS) {
+			dev_err(&pf->pdev->dev,
+				"%s: Queue control check failed on RX queue %d of VSI %d VF %d\n",
+				__func__, queue_id, vsi_id, vf->vf_id);
+		}
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	tempmap = vqs->tx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		ret = i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+					     I40E_QUEUE_CTRL_ENABLECHECK);
+		if (ret != I40E_SUCCESS) {
+			dev_err(&pf->pdev->dev,
+				"%s: Queue control check failed on TX queue %d of VSI %d VF %d\n",
+				__func__, queue_id, vsi_id, vf->vf_id);
+		}
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+error_param:
+
+	/* send the response to the vf */
+	ret = i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, ret);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_disable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to disable all or specific
+ * queue(s)
+ **/
+static enum i40e_status_code i40e_vc_disable_queues_msg(struct i40e_vf *vf,
+							u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_queue_select *vqs =
+	    (struct i40e_virtchnl_queue_select *)msg;
+	struct i40e_pf *pf = vf->pf;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u16 vsi_id = vqs->vsi_id;
+	u16 queue_id;
+	unsigned long tempmap;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	tempmap = vqs->rx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		ret = i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+					     I40E_QUEUE_CTRL_DISABLE);
+
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	tempmap = vqs->tx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		ret = i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+					     I40E_QUEUE_CTRL_DISABLE);
+
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	/* Poll the status register to make sure that the
+	 * requested op was completed successfully
+	 */
+	udelay(10);
+
+	tempmap = vqs->rx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		ret = i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+					     I40E_QUEUE_CTRL_DISABLECHECK);
+		if (ret != I40E_SUCCESS) {
+			dev_err(&pf->pdev->dev,
+				"%s: Queue control check failed on RX queue %d of VSI %d VF %d\n",
+				__func__, queue_id, vsi_id, vf->vf_id);
+		}
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	tempmap = vqs->tx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		ret = i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+					     I40E_QUEUE_CTRL_DISABLECHECK);
+		if (ret != I40E_SUCCESS) {
+			dev_err(&pf->pdev->dev,
+				"%s: Queue control check failed on TX queue %d of VSI %d VF %d\n",
+				__func__, queue_id, vsi_id, vf->vf_id);
+		}
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+error_param:
+
+	/* send the response to the vf */
+	ret = i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, ret);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_get_stats_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to get vsi stats
+ **/
+static enum i40e_status_code i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg,
+						   u16 msglen)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_virtchnl_queue_select *vqs =
+	    (struct i40e_virtchnl_queue_select *)msg;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	struct i40e_eth_stats stats;
+	struct i40e_vsi *vsi;
+
+	memset(&stats, 0, sizeof(struct i40e_eth_stats));
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	vsi = pf->vsi[vqs->vsi_id];
+	if (!vsi) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	i40e_update_eth_stats(vsi);
+	memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+
+error_param:
+
+	/* send the response back to the vf */
+	ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, ret,
+				     (u8 *)&stats, sizeof(stats));
+
+	return ret;
+}
+
+/**
+ * i40e_vc_add_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * add guest mac address filter
+ **/
+static enum i40e_status_code i40e_vc_add_mac_addr_msg(struct i40e_vf *vf,
+						      u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_ether_addr_list *al =
+	    (struct i40e_virtchnl_ether_addr_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u16 vsi_id = al->vsi_id;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < al->num_elements; i++) {
+		if (is_broadcast_ether_addr(al->list[i].addr) ||
+		    is_zero_ether_addr(al->list[i].addr)) {
+			dev_err(&pf->pdev->dev, "%s: invalid MAC addr %pMAC\n",
+				__func__, al->list[i].addr);
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+	}
+	vsi = pf->vsi[vsi_id];
+
+	/* add new addresses to the list */
+	for (i = 0; i < al->num_elements; i++) {
+		if (!i40e_find_mac(vsi, al->list[i].addr, true, false)) {
+			if (i40e_is_vsi_in_vlan(vsi))
+				ret = i40e_put_mac_in_vlan(vsi,
+						al->list[i].addr,
+						true, false);
+			else
+				ret = !!i40e_add_filter(vsi,
+						al->list[i].addr, -1,
+						true, false);
+		}
+
+		if (ret != I40E_SUCCESS) {
+			dev_err(&pf->pdev->dev,
+				"%s: Unable to add MAC filter\n", __func__);
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+	}
+
+	/* program the updated filter list */
+	ret = i40e_sync_vsi_filters(vsi);
+	if (I40E_SUCCESS != ret)
+		dev_err(&pf->pdev->dev,
+			"%s: Unable to program MAC filters\n", __func__);
+
+error_param:
+
+	/* send the response to the vf */
+	ret = i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+				      ret);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_del_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove guest mac address filter
+ **/
+static enum i40e_status_code i40e_vc_del_mac_addr_msg(struct i40e_vf *vf,
+						      u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_ether_addr_list *al =
+	    (struct i40e_virtchnl_ether_addr_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u16 vsi_id = al->vsi_id;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	vsi = pf->vsi[vsi_id];
+
+	/* delete addresses from the list */
+	for (i = 0; i < al->num_elements; i++)
+		i40e_del_filter(vsi, al->list[i].addr,
+				I40E_VLAN_ANY, true, false);
+
+	/* program the updated filter list */
+	ret = i40e_sync_vsi_filters(vsi);
+	if (I40E_SUCCESS != ret)
+		dev_err(&pf->pdev->dev,
+			"%s: Unable to program MAC filters\n", __func__);
+
+error_param:
+
+	/* send the response to the vf */
+	ret = i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+				      ret);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_add_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * program guest vlan id
+ **/
+static enum i40e_status_code i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg,
+						  u16 msglen)
+{
+	struct i40e_virtchnl_vlan_filter_list *vfl =
+	    (struct i40e_virtchnl_vlan_filter_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u16 vsi_id = vfl->vsi_id;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < vfl->num_elements; i++) {
+		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+			ret = I40E_ERR_PARAM;
+			dev_err(&pf->pdev->dev,
+				"%s: invalid VLAN id %d\n",
+				__func__, vfl->vlan_id[i]);
+			goto error_param;
+		}
+	}
+	vsi = pf->vsi[vsi_id];
+	if (vsi->info.pvid) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	i40e_vlan_stripping_enable(vsi);
+	for (i = 0; i < vfl->num_elements; i++) {
+		/* add new VLAN filter */
+		ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+		if (ret != I40E_SUCCESS) {
+			dev_err(&pf->pdev->dev,
+				"%s: Unable to add vlan filter %d, error %d\n",
+				__func__, vfl->vlan_id[i], ret);
+			goto error_param;
+		}
+	}
+
+error_param:
+
+	/* send the response to the vf */
+	ret = i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, ret);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_remove_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove programmed guest vlan id
+ **/
+static enum i40e_status_code i40e_vc_remove_vlan_msg(struct i40e_vf *vf,
+						     u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_vlan_filter_list *vfl =
+	    (struct i40e_virtchnl_vlan_filter_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u16 vsi_id = vfl->vsi_id;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < vfl->num_elements; i++) {
+		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+	}
+
+	vsi = pf->vsi[vsi_id];
+	if (vsi->info.pvid) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < vfl->num_elements; i++) {
+		ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+		if (I40E_SUCCESS != ret)
+			dev_err(&pf->pdev->dev,
+			     "%s: Unable to delete vlan filter %d, error %d\n",
+			      __func__, vfl->vlan_id[i], ret);
+	}
+
+error_param:
+
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, ret);
+}
+
+/**
+ * i40e_vc_fcoe_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf for the fcoe msgs
+ **/
+static enum i40e_status_code i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg,
+					      u16 msglen)
+{
+	enum i40e_status_code ret = I40E_SUCCESS;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	ret = I40E_ERR_NOT_IMPLEMENTED;
+
+error_param:
+
+	/* send the response to the vf */
+	ret = i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, ret);
+
+	return ret;
+}
+
+/**
+ * i40e_vc_validate_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * validate msg
+ **/
+static enum i40e_status_code i40e_vc_validate_vf_msg(struct i40e_vf *vf,
+						     u32 v_opcode,
+						     u32 v_retval, u8 *msg,
+						     u16 msglen)
+{
+	bool err_msg_format = false;
+	int valid_len;
+
+	/* Check if VF is disabled. */
+	if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
+		return I40E_ERR_PARAM;
+
+	/* Validate message length. */
+	switch (v_opcode) {
+	case I40E_VIRTCHNL_OP_VERSION:
+		valid_len = sizeof(struct i40e_virtchnl_version_info);
+		break;
+	case I40E_VIRTCHNL_OP_RESET_VF:
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		valid_len = 0;
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+		valid_len = sizeof(struct i40e_virtchnl_txq_info);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+		valid_len = sizeof(struct i40e_virtchnl_rxq_info);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_vsi_queue_config_info *vqc =
+			    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+			valid_len += (vqc->num_queue_pairs *
+				      sizeof(struct
+					     i40e_virtchnl_queue_pair_info));
+			if (vqc->num_queue_pairs == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_irq_map_info *vimi =
+			    (struct i40e_virtchnl_irq_map_info *)msg;
+			valid_len += (vimi->num_vectors *
+				      sizeof(struct i40e_virtchnl_vector_map));
+			if (vimi->num_vectors == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+		valid_len = sizeof(struct i40e_virtchnl_queue_select);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+		valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_ether_addr_list *veal =
+			    (struct i40e_virtchnl_ether_addr_list *)msg;
+			valid_len += veal->num_elements *
+			    sizeof(struct i40e_virtchnl_ether_addr);
+			if (veal->num_elements == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_ADD_VLAN:
+	case I40E_VIRTCHNL_OP_DEL_VLAN:
+		valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_vlan_filter_list *vfl =
+			    (struct i40e_virtchnl_vlan_filter_list *)msg;
+			valid_len += vfl->num_elements * sizeof(u16);
+			if (vfl->num_elements == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		valid_len = sizeof(struct i40e_virtchnl_promisc_info);
+		break;
+	case I40E_VIRTCHNL_OP_GET_STATS:
+		valid_len = sizeof(struct i40e_virtchnl_queue_select);
+		break;
+	/* These are always errors coming from the VF. */
+	case I40E_VIRTCHNL_OP_EVENT:
+	case I40E_VIRTCHNL_OP_UNKNOWN:
+	default:
+		return I40E_ERR_PARAM;
+		break;
+	}
+	/* few more checks */
+	if ((valid_len != msglen) || (err_msg_format)) {
+		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
+		return I40E_ERR_PARAM;
+	} else {
+		return I40E_SUCCESS;
+	}
+}
+
+/**
+ * i40e_vc_process_vf_msg
+ * @pf: pointer to the pf structure
+ * @vf_id: source vf id
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * called from the common aeq/arq handler to
+ * process request from vf
+ **/
+enum i40e_status_code i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id,
+					     u32 v_opcode, u32 v_retval,
+					     u8 *msg, u16 msglen)
+{
+	struct i40e_vf *vf = &(pf->vf[vf_id]);
+	struct i40e_hw *hw = &pf->hw;
+	enum i40e_status_code ret;
+
+	pf->vf_aq_requests++;
+	/* perform basic checks on the msg */
+	ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval,
+					    msg, msglen);
+
+	if (ret != I40E_SUCCESS) {
+		dev_err(&pf->pdev->dev, "%s: invalid message from vf %d\n",
+			__func__, vf_id);
+		return ret;
+	}
+	wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+	switch (v_opcode) {
+	case I40E_VIRTCHNL_OP_VERSION:
+		ret = i40e_vc_get_version_msg(vf);
+		break;
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		ret = i40e_vc_get_vf_resources_msg(vf);
+		break;
+	case I40E_VIRTCHNL_OP_RESET_VF:
+		ret = i40e_vc_reset_vf_msg(vf);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		ret = i40e_vc_config_promiscuous_mode_msg(vf,
+								msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		ret = i40e_vc_config_queues_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+		ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+		ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+		ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+		ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_VLAN:
+		ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_DEL_VLAN:
+		ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_GET_STATS:
+		ret = i40e_vc_get_stats_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_FCOE:
+		ret = i40e_vc_fcoe_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_UNKNOWN:
+	default:
+		dev_err(&pf->pdev->dev,
+			"%s: Unsupported opcode %d from vf %d\n",
+			__func__, v_opcode, vf_id);
+		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
+					      I40E_ERR_NOT_IMPLEMENTED);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_vc_process_vflr_event
+ * @pf: pointer to the pf structure
+ *
+ * called from the vlfr irq handler to
+ * free up vf resources and state variables
+ **/
+enum i40e_status_code i40e_vc_process_vflr_event(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vf *vf;
+	enum i40e_status_code ret;
+	u32 reg, reg_idx, bit_idx, vf_id;
+
+	if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+		return I40E_SUCCESS;
+
+	clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+		/* read GLGEN_VFLRSTAT register to find out the flr vfs */
+		vf = &pf->vf[vf_id];
+		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
+		if (reg & (1 << bit_idx)) {
+			/* clear the bit in GLGEN_VFLRSTAT */
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+
+			ret = i40e_reset_vf(vf, true);
+			if (ret != I40E_SUCCESS)
+				dev_err(&pf->pdev->dev,
+					"%s: Unable to reset the VF %d\n",
+					__func__, vf_id);
+			/* free up vf resources to destroy vsi state */
+			ret = i40e_free_vf_res(vf);
+			if (ret != I40E_SUCCESS)
+				dev_err(&pf->pdev->dev,
+					"%s: Failed to free VF resources %d\n",
+					__func__, vf_id);
+
+			/* allocate new vf resources with the default state */
+			ret = i40e_alloc_vf_res(vf);
+			if (ret != I40E_SUCCESS)
+				dev_err(&pf->pdev->dev,
+					"%s: Unable to allocate VF resources %d\n",
+					__func__, vf_id);
+
+			ret = i40e_enable_vf_mappings(vf);
+		}
+
+	}
+
+	/* re-enable vflr interrupt cause */
+	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+	flush(hw);
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the pf structure
+ * @opcode: operation code
+ * @retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+				 enum i40e_virtchnl_ops v_opcode,
+				 enum i40e_status_code v_retval, u8 *msg,
+				 u16 msglen)
+{
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vf *vf = pf->vf;
+	int i;
+
+	for (i = 0; i < pf->num_alloc_vfs; i++) {
+		/* Ignore return value on purpose - a given VF may fail, but
+		 * we need to keep going and send to all of them
+		 */
+		i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+				       msg, msglen, NULL);
+		vf++;
+	}
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the pf structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+	struct i40e_virtchnl_pf_event pfe;
+
+	pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+	pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+	pfe.event_data.link_event.link_status =
+	    pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+	pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
+
+	i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+			     (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the pf structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+	struct i40e_virtchnl_pf_event pfe;
+
+	pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+	pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+	i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+			     (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the vf structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+	struct i40e_virtchnl_pf_event pfe;
+
+	pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+	pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+	i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+			       I40E_SUCCESS, (u8 *)&pfe,
+			       sizeof(struct i40e_virtchnl_pf_event), NULL);
+}
+
+
+/**
+ * i40e_ndo_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @mac: mac address
+ *
+ * program vf mac address
+ **/
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_vf *vf;
+	struct i40e_mac_filter *f;
+	int ret = 0;
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev,
+			"%s: Invalid VF Identifier %d\n", __func__, vf_id);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	vf = &(pf->vf[vf_id]);
+	vsi = pf->vsi[vf->lan_vsi_index];
+	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev,
+			"%s: Uninitialized VF %d\n", __func__, vf_id);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	if (!is_valid_ether_addr(mac)) {
+		dev_err(&pf->pdev->dev,
+			"%s: Invalid ethernet address\n", __func__);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	/* delete the temporary mac address */
+	i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+
+	/* add the new mac address */
+	f = i40e_add_filter(vsi, mac, 0, true, false);
+	if (NULL == f) {
+		dev_err(&pf->pdev->dev,
+			"%s: Unable to add ucast filter\n", __func__);
+		ret = -ENOMEM;
+		goto error_param;
+	}
+
+	dev_info(&pf->pdev->dev, "%s: Setting MAC %pM on VF %d\n",
+		 __func__, mac, vf_id);
+	/* program mac filter */
+	if (I40E_SUCCESS != i40e_sync_vsi_filters(vsi)) {
+		dev_err(&pf->pdev->dev,
+			"%s: Unable to program ucast filters\n", __func__);
+		ret = -EIO;
+		goto error_param;
+	}
+	memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+	dev_info(&pf->pdev->dev,
+		 "%s: Reload the VF driver to make this change effective.\n",
+		 __func__);
+	ret = 0;
+
+error_param:
+	return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @vlan_id: mac address
+ * @qos: priority setting
+ *
+ * program vf vlan id and/or qos
+ **/
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+			      int vf_id, u16 vlan_id, u8 qos)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_vf *vf;
+	struct i40e_vsi *vsi;
+	int ret = I40E_SUCCESS;
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev, "%s: Invalid VF Identifier %d\n",
+			__func__, vf_id);
+		ret = -EINVAL;
+		goto error_pvid;
+	}
+
+	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
+		dev_err(&pf->pdev->dev, "%s: Invalid Parameters\n", __func__);
+		ret = -EINVAL;
+		goto error_pvid;
+	}
+
+	vf = &(pf->vf[vf_id]);
+	vsi = pf->vsi[vf->lan_vsi_index];
+	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev,
+			"%s: Uninitialized VF %d\n", __func__, vf_id);
+		ret = -EINVAL;
+		goto error_pvid;
+	}
+
+	if (vsi->info.pvid) {
+		/* kill old VLAN */
+		ret = i40e_vsi_kill_vlan(vsi, vsi->info.pvid & VLAN_VID_MASK);
+		if (ret != I40E_SUCCESS) {
+			dev_info(&vsi->back->pdev->dev,
+				"%s: remove VLAN failed, ret=%d, aq_err=%d\n",
+				__func__, ret, pf->hw.aq.asq_last_status);
+		}
+	}
+	if (vlan_id || qos)
+		ret = i40e_vsi_add_pvid(vsi,
+				vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+	else
+		i40e_vlan_stripping_disable(vsi);
+
+	if (vlan_id) {
+		dev_info(&pf->pdev->dev,
+			"%s: Setting VLAN %d, QOS 0x%x on VF %d\n",
+			__func__, vlan_id, qos, vf_id);
+
+		/* add new VLAN filter */
+		ret = i40e_vsi_add_vlan(vsi, vlan_id);
+		if (ret != I40E_SUCCESS) {
+			dev_info(&vsi->back->pdev->dev,
+				 "%s: add VLAN failed, ret=%d aq_err=%d\n",
+				 __func__, ret,
+				 vsi->back->hw.aq.asq_last_status);
+			goto error_pvid;
+		}
+	}
+
+	if (ret != I40E_SUCCESS) {
+		dev_err(&pf->pdev->dev, "%s: Unable to update vsi context\n",
+			__func__);
+		ret = -EIO;
+		goto error_pvid;
+	}
+	ret = 0;
+
+error_pvid:
+	return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_bw
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @tx_rate: tx rate
+ *
+ * configure vf tx rate
+ **/
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ndo_get_vf_config
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @ivi: vf configuration structure
+ *
+ * return vf configuration
+ **/
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+			   int vf_id, struct ifla_vf_info *ivi)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_vf *vf;
+	struct i40e_mac_filter *f, *ftmp;
+	int ret = 0;
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev,
+			"%s: Invalid VF Identifier %d\n", __func__, vf_id);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	vf = &(pf->vf[vf_id]);
+	/* first vsi is always the LAN vsi */
+	vsi = pf->vsi[vf->lan_vsi_index];
+	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev,
+			"%s: Uninitialized VF %d\n", __func__, vf_id);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	ivi->vf = vf_id;
+
+	/* first entry of the list is the default ethernet address */
+	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+		memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
+		break;
+	}
+
+	ivi->tx_rate = 0;
+	ivi->vlan = vsi->info.pvid & I40E_VLAN_MASK;
+	ivi->qos = (vsi->info.pvid & I40E_PRIORITY_MASK) >>
+		   I40E_VLAN_PRIORITY_SHIFT;
+	ret = 0;
+
+error_param:
+	return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
new file mode 100644
index 0000000..4195cd0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -0,0 +1,123 @@
+/*******************************************************************************
+
+  Intel Ethernet Controller XL710 Family Linux Driver
+  Copyright(c) 2013 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@...ts.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_PF_H_
+#define _I40E_VIRTCHNL_PF_H_
+
+#include "i40e.h"
+
+#define I40E_MAX_MACVLAN_FILTERS 256
+#define I40E_MAX_VLAN_FILTERS 256
+#define I40E_MAX_VLANID 4095
+
+#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
+
+#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED	3
+#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED	10
+
+#define I40E_VLAN_PRIORITY_SHIFT	12
+#define I40E_VLAN_MASK			0xFFF
+#define I40E_PRIORITY_MASK		0x7000
+
+/* Various queue ctrls */
+enum i40e_queue_ctrl {
+	I40E_QUEUE_CTRL_UNKNOWN = 0,
+	I40E_QUEUE_CTRL_ENABLE,
+	I40E_QUEUE_CTRL_ENABLECHECK,
+	I40E_QUEUE_CTRL_DISABLE,
+	I40E_QUEUE_CTRL_DISABLECHECK,
+	I40E_QUEUE_CTRL_FASTDISABLE,
+	I40E_QUEUE_CTRL_FASTDISABLECHECK,
+};
+
+/* VF states */
+enum i40e_vf_states {
+	I40E_VF_STAT_INIT = 0,
+	I40E_VF_STAT_ACTIVE,
+	I40E_VF_STAT_FCOEENA,
+	I40E_VF_STAT_DISABLED,
+};
+
+/* VF capabilities */
+enum i40e_vf_capabilities {
+	I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+	I40E_VIRTCHNL_VF_CAP_L2,
+};
+
+/* VF information structure */
+struct i40e_vf {
+	struct i40e_pf *pf;
+
+	/* vf id in the pf space */
+	u16 vf_id;
+	/* all vf vsis connect to the same parent */
+	enum i40e_switch_element_types parent_type;
+
+	/* vf Port Extender (PE) stag if used */
+	u16 stag;
+
+	struct i40e_virtchnl_ether_addr default_lan_addr;
+	struct i40e_virtchnl_ether_addr default_fcoe_addr;
+
+	/* VSI indices - actual VSI pointers are maintained in the PF structure
+	 * When assigned, these will be non-zero, because VSI 0 is always
+	 * the main LAN VSI for the PF.
+	 */
+	u8 lan_vsi_index;	/* index into PF struct */
+	u8 lan_vsi_id;		/* ID as used by firmware */
+
+	u8 num_queue_pairs;	/* num of qps assigned to vf vsis */
+	u64 num_mdd_events;	/* num of mdd events detected */
+	u64 num_invalid_msgs;	/* num of malformed or invalid msgs detected */
+	u64 num_valid_msgs;	/* num of valid msgs detected */
+
+	unsigned long vf_caps;	/* vf's adv. capabilities */
+	unsigned long vf_states;	/* vf's runtime states */
+};
+
+enum i40e_status_code i40e_free_vfs(struct i40e_pf *pf);
+int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+enum i40e_status_code i40e_vc_process_vf_msg(struct i40e_pf *pf,
+					     u16 vf_id, u32 v_opcode,
+					     u32 v_retval, u8 *msg,
+					     u16 msglen);
+enum i40e_status_code i40e_vc_process_vflr_event(struct i40e_pf *pf);
+enum i40e_status_code i40e_vc_process_mdd_event(struct i40e_pf *pf);
+enum i40e_status_code i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
+
+/* vf configuration related iplink handlers */
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+			      int vf_id, u16 vlan_id, u8 qos);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+			   int vf_id, struct ifla_vf_info *ivi);
+void i40e_vc_notify_link_state(struct i40e_pf *pf);
+void i40e_vc_notify_reset(struct i40e_pf *pf);
+
+#endif /* _I40E_VIRTCHNL_PF_H_ */
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ