lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1185957077.5552.22.camel@dell>
Date:	Wed, 01 Aug 2007 01:31:17 -0700
From:	"Michael Chan" <mchan@...adcom.com>
To:	davem@...emloft.net, jeff@...zik.org, netdev@...r.kernel.org
cc:	eliezert@...adcom.com, lusinsky@...adcom.com, eilong@...adcom.com
Subject: [RFC][BNX2X]: New driver for Broadcom 10Gb Ethernet.

From: Eliezer Tamir <eliezert@...adcom.com>

This is an initial version of the BNX2X, the Linux driver for the
BCM57710 10Gb Ethernet controller family.
 
Although the chip is very different from the 5706-8 family we based the
driver code on the BNX2 driver.
 
Since the hardware is supposed to be generally available soon I'm
posting an initial version and after hearing all the comments I intend
to repost with whatever modifications needed ASAP.
 
Some planned feature are still under development, but we want to get
whatever we have out now so people can start using the HW.
 
Some of the generated files still need some coding style changes.
These files are maintained by other teams and we estimate we will have
them re-formatted within two to three weeks.
 
Due to a last minute technical problem I am not able to send the patches
myself.
Michael has kindly agreed to send the patchset for me.
 
I would like to thank Michael for all his help.
 
The full patch is here:

ftp://Net_sys_anon@...1.broadcom.com/0001-BNX2X.patch

It has a big firmware file that will not get through the mailing list.
Only the main bnx2x.[ch] files are included below for review.

--

[BNX2X]: New driver for Broadcom NX2 10G ethernet chips.

Signed-off-by: Eliezer Tamir <eliezert@...adcom.com>

---
 drivers/net/Kconfig             |    9 +
 drivers/net/Makefile            |    1 +
 drivers/net/bnx2x.c             | 8503 +++++++++
 drivers/net/bnx2x.h             |  962 +
 drivers/net/bnx2x_asm.h         |37017 +++++++++++++++++++++++++++++++++++++++
 drivers/net/bnx2x_hsi.h         | 7823 +++++++++
 drivers/net/bnx2x_init.h        |  337 +
 drivers/net/bnx2x_init_values.h | 7698 ++++++++
 drivers/net/bnx2x_self_test.h   | 1199 ++
 include/linux/pci_ids.h         |    1 +
 10 files changed, 63550 insertions(+), 0 deletions(-)
 create mode 100644 drivers/net/bnx2x.c
 create mode 100644 drivers/net/bnx2x.h
 create mode 100644 drivers/net/bnx2x_asm.h
 create mode 100644 drivers/net/bnx2x_hsi.h
 create mode 100644 drivers/net/bnx2x_init.h
 create mode 100644 drivers/net/bnx2x_init_values.h
 create mode 100644 drivers/net/bnx2x_self_test.h

diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c
new file mode 100644
index 0000000..8c0916a
--- /dev/null
+++ b/drivers/net/bnx2x.c
@@ -0,0 +1,8503 @@
+/* bnx2x.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Eliezer Tamir <eliezert@...adcom.com>
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath rework by Vladislav Zolotarov
+ * Statistics and Link managment by Yitchak Gertner
+ *
+ */
+
+/* define this to make the driver freeze on error
+ * -for now this is the defualt.
+ */
+#define BNX2X_STOP_ON_ERROR
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>  /* for dev_info() */
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#ifdef NETIF_F_HW_VLAN_TX
+	#include <linux/if_vlan.h>
+	#define BCM_VLAN 1
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/prefetch.h>
+#include <linux/zlib.h>
+
+typedef struct {
+	u8 reserved[64];
+} license_key_t;
+#include "bnx2x_hsi.h"
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_self_test.h"
+
+
+#define DRV_MODULE_VERSION	"0.40.8"
+#define DRV_MODULE_RELDATE	"$Date: 2007/07/31 20:52:29 $"
+#define BNX2X_BC_REV		0x040002
+
+#define RUN_AT(x)		(jiffies + (x))
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT		(5*HZ)
+
+static const char version[] __devinitdata =
+	"Broadcom NetXtreme II (Everest) 10Gigabit Ethernet Driver "
+	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Eliezer Tamir <eliezert@...adcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_INFO(cvs_version, "$Revision: 1.282 $");
+
+static int use_inta;
+static int poll;
+static int onefunc;
+static int nomcp;
+static int debug;
+static int iscsi_active;
+module_param(use_inta, int, 0);
+module_param(poll, int, 0);
+module_param(onefunc, int, 0);
+module_param(debug, int, 0);
+MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
+MODULE_PARM_DESC(poll, "use polling (for debug)");
+MODULE_PARM_DESC(onefunc, "enable only first function");
+MODULE_PARM_DESC(nomcp, "ignore managment CPU (Implies onefunc)");
+MODULE_PARM_DESC(debug, "defualt debug msglevel");
+
+typedef enum {
+	BCM5710 = 0,
+} board_t;
+
+/* indexed by board_t, above */
+static const struct {
+	char *name;
+} board_info[] __devinitdata = {
+	{ "Broadcom NetXtreme II BCM5710 XGb" },
+};
+
+static struct pci_device_id bnx2x_pci_tbl[] = {
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5710,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5710 },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+
+
+/* PHY/MAC */
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+#define MDIO_INDIRECT_REG_ADDR	0x1f
+#define MDIO_SET_REG_BANK(bp, reg_bank) \
+		bnx2x_mdio22_write(bp, MDIO_INDIRECT_REG_ADDR, reg_bank)
+
+#define MDIO_ACCESS_TIMEOUT	1000
+#define MAX_SPQ_PENDING         8
+
+#define NIG_WR(reg, val)	REG_WR(bp, GRCBASE_NIG, reg, val)
+#define EMAC_WR(reg, val)	REG_WR(bp, emac_base, reg, val)
+#define BMAC_WR(reg, val)	REG_WR(bp, GRCBASE_NIG, bmac_addr + reg, val)
+
+#define REG_ADDR(bp, block, offset)	((u8 *)bp->regview + block + offset)
+#define BMAC_ADDR(reg)		(GRCBASE_NIG + bmac_addr + reg)
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
+{
+	u32 val;
+	int port = bp->port;
+
+	NIG_WR(NIG_REGISTERS_LED_MODE_P0 + port*4,
+	       ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
+		SHARED_HW_CFG_LED_MODE_SHIFT));
+	NIG_WR(NIG_REGISTERS_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
+
+	switch (speed) {
+	case SPEED_1000:
+		val = 0x3ff;
+		break;
+
+	case SPEED_100:
+		val = 0x7ff;
+		break;
+
+	case SPEED_10:
+		val = 0xfff;
+		break;
+
+	default:	/* 2.5G and up */
+		val = 0xff;
+		break;
+	}
+	NIG_WR(NIG_REGISTERS_LED_CONTROL_BLINK_RATE_P0 + port*4, val);
+	NIG_WR(NIG_REGISTERS_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
+}
+
+static void bnx2x_leds_unset(struct bnx2x *bp)
+{
+	int port = bp->port;
+
+	NIG_WR(NIG_REGISTERS_LED_10G_P0 + port*4, 0);
+
+	NIG_WR(NIG_REGISTERS_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
+}
+
+static inline u32 bnx2x_bits_en(struct bnx2x *bp, u32 block, u32 reg,
+				u32 bits)
+{
+	u32 val = REG_RD(bp, block, reg);
+
+	val |= bits;
+	REG_WR(bp, block, reg, val);
+	return val;
+}
+
+static inline u32 bnx2x_bits_dis(struct bnx2x *bp, u32 block, u32 reg,
+				 u32 bits)
+{
+	u32 val = REG_RD(bp, block, reg);
+
+	val &= ~bits;
+	REG_WR(bp, block, reg, val);
+	return val;
+}
+
+static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
+{
+	int rc;
+	u32 tmp, i;
+	int port = bp->port;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+
+		tmp = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
+		EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
+		REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		udelay(40);
+	}
+
+	tmp = ((bp->phy_addr << 21) | (reg << 16) |
+	       (val & EMAC_MDIO_COMM_DATA) |
+	       EMAC_MDIO_COMM_COMMAND_WRITE_22 |
+	       EMAC_MDIO_COMM_START_BUSY);
+	EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		tmp = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_COMM);
+		if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+
+	if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+		BNX2X_ERR("write phy register failed\n");
+
+		rc = -EBUSY;
+	} else {
+		rc = 0;
+	}
+
+	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+
+		tmp = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		tmp |= EMAC_MDIO_MODE_AUTO_POLL;
+		EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
+	}
+
+	return rc;
+}
+
+static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
+{
+	int rc;
+	u32 val, i;
+	int port = bp->port;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+
+		val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		val &= ~EMAC_MDIO_MODE_AUTO_POLL;
+		EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
+		REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		udelay(40);
+	}
+
+	val = ((bp->phy_addr << 21) | (reg << 16) |
+	       EMAC_MDIO_COMM_COMMAND_READ_22 |
+	       EMAC_MDIO_COMM_START_BUSY);
+	EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_COMM);
+		if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+			val &= EMAC_MDIO_COMM_DATA;
+			break;
+		}
+	}
+
+	if (val & EMAC_MDIO_COMM_START_BUSY) {
+		BNX2X_ERR("read phy register failed\n");
+
+		*ret_val = 0x0;
+		rc = -EBUSY;
+	} else {
+		*ret_val = val;
+		rc = 0;
+	}
+
+	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+
+		val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		val |= EMAC_MDIO_MODE_AUTO_POLL;
+		EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
+	}
+
+	return rc;
+}
+
+static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
+{
+	int rc = 0;
+	u32 tmp, i;
+	int port = bp->port;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+
+		tmp = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
+		EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
+		REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		udelay(40);
+	}
+
+	/* set clause 45 mode */
+	tmp = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+	tmp |= EMAC_MDIO_MODE_CLAUSE_45;
+	EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
+
+	/* address */
+	tmp = ((bp->phy_addr << 21) | (reg << 16) | addr |
+	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
+	       EMAC_MDIO_COMM_START_BUSY);
+	EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		tmp = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_COMM);
+		if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+
+	if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+		BNX2X_ERR("write phy register failed\n");
+
+		rc = -EBUSY;
+	} else {
+		/* data */
+		tmp = ((bp->phy_addr << 21) | (reg << 16) | val |
+		       EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+		       EMAC_MDIO_COMM_START_BUSY);
+		EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+		for (i = 0; i < 50; i++) {
+			udelay(10);
+
+			tmp = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_COMM);
+			if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+				udelay(5);
+				break;
+			}
+		}
+
+		if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+			BNX2X_ERR("write phy register failed\n");
+
+			rc = -EBUSY;
+		}
+	}
+
+	/* unset clause 45 mode */
+	tmp = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+	tmp &= ~EMAC_MDIO_MODE_CLAUSE_45;
+	EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
+
+	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+
+		tmp = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		tmp |= EMAC_MDIO_MODE_AUTO_POLL;
+		EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
+	}
+
+	return rc;
+}
+
+static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr,
+			     u32 *ret_val)
+{
+	int rc = 0;
+	u32 val, i;
+	int port = bp->port;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+
+		val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		val &= ~EMAC_MDIO_MODE_AUTO_POLL;
+		EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
+		REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		udelay(40);
+	}
+
+	/* set clause 45 mode */
+	val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+	val |= EMAC_MDIO_MODE_CLAUSE_45;
+	EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
+
+	/* address */
+	val = ((bp->phy_addr << 21) | (reg << 16) | addr |
+	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
+	       EMAC_MDIO_COMM_START_BUSY);
+	EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_COMM);
+		if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+
+	if (val & EMAC_MDIO_COMM_START_BUSY) {
+		BNX2X_ERR("read phy register failed\n");
+
+		*ret_val = 0;
+		rc = -EBUSY;
+	} else {
+		/* data */
+		val = ((bp->phy_addr << 21) | (reg << 16) |
+		       EMAC_MDIO_COMM_COMMAND_READ_45 |
+		       EMAC_MDIO_COMM_START_BUSY);
+		EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
+
+		for (i = 0; i < 50; i++) {
+			udelay(10);
+
+			val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_COMM);
+			if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+				val &= EMAC_MDIO_COMM_DATA;
+				break;
+			}
+		}
+
+		if (val & EMAC_MDIO_COMM_START_BUSY) {
+			BNX2X_ERR("read phy register failed\n");
+
+			val = 0;
+			rc = -EBUSY;
+		}
+
+		*ret_val = val;
+	}
+
+	/* unset clause 45 mode */
+	val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+	val &= ~EMAC_MDIO_MODE_CLAUSE_45;
+	EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
+
+	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+
+		val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MDIO_MODE);
+		val |= EMAC_MDIO_MODE_AUTO_POLL;
+		EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
+	}
+
+	return rc;
+}
+
+static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
+{
+	int i;
+	u32 rd_val;
+
+	for (i = 0; i < 10; i++) {
+		bnx2x_mdio45_write(bp, reg, addr, val);
+		mdelay(5);
+		bnx2x_mdio45_read(bp, reg, addr, &rd_val);
+		/* if the read value is not the same as the value we wrote,
+		   we should write it again */
+		if (rd_val == val) {
+			return 0;
+		}
+	}
+	BNX2X_ERR("MDIO write in CL45 failed\n");
+	return -EBUSY;
+}
+
+/* copy command into DMAE command memory */
+static void bnx2x_post_dmae(struct bnx2x *bp, int idx)
+{
+	struct dmae_command *dmae = &bp->dmae;
+	int i;
+
+	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
+		REG_WR32(bp, GRCBASE_DMAE,
+			 (DMAE_REGISTERS_CMD_MEM + 14*4*idx) + i*4,
+			 *(((u32 *)dmae) + i));
+	}
+}
+
+/* DMAE command positions used
+ * Port0 14
+ * Port1 15
+ */
+static void bnx2x_wb_write_dmae(struct bnx2x *bp, u32 wb_addr, u32 *wb_write,
+				u32 wb_len)
+{
+	struct dmae_command *dmae = &bp->dmae;
+	int port = bp->port;
+	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+
+	memcpy(bnx2x_sp(bp, wb_write[0]), wb_write, wb_len * 4);
+	memset(dmae, 0, sizeof(struct dmae_command));
+
+	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+			DMAE_CMD_ENDIANITY_DW_SWAP |
+			(port? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_write));
+	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_write));
+	dmae->dst_addr_lo = wb_addr >> 2;
+	dmae->dst_addr_hi = 0;
+	dmae->len = wb_len;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+	dmae->comp_val = BNX2X_WB_COMP_VAL;
+	bnx2x_post_dmae(bp, port? 15 : 14);
+
+	*wb_comp = 0;
+
+	REG_WR32(bp, GRCBASE_DMAE,
+		 (bp->port? DMAE_REGISTERS_GO_C15 :
+			    DMAE_REGISTERS_GO_C14), 1);
+	udelay(5);
+	while (*wb_comp != BNX2X_WB_COMP_VAL) {
+		udelay(5);
+	}
+}
+
+/****************************************************************************
+* Deferred Interrupt service functions
+****************************************************************************/
+
+static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
+{
+	u32 ld_pause;	/* local driver */
+	u32 lp_pause;	/* link partner */
+	u32 pause_result;
+
+	bp->flow_ctrl = 0;
+
+	/* reolve from gp_status in case of AN complete and not sgmii */
+	if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
+	    (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
+	    (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
+	    (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
+
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+		bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+				  &ld_pause);
+		bnx2x_mdio22_read(bp,
+			MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+				  &lp_pause);
+		pause_result = (ld_pause &
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 5;
+		pause_result |= (lp_pause &
+				 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+		DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
+
+		switch (pause_result) {			/* ASYM P ASYM P */
+		case 0xb:				/*   1  0   1  1 */
+			bp->flow_ctrl = FLOW_CTRL_TX;
+			break;
+
+		case 0xe:				/*   1  1   1  0 */
+			bp->flow_ctrl = FLOW_CTRL_RX;
+			break;
+
+		case 0x5:				/*   0  1   0  1 */
+		case 0x7:				/*   0  1   1  1 */
+		case 0xd:				/*   1  1   0  1 */
+		case 0xf:				/*   1  1   1  1 */
+			bp->flow_ctrl = FLOW_CTRL_BOTH;
+			break;
+
+		default:
+			break;
+		}
+
+	} else { /* forced mode */
+		bp->flow_ctrl = bp->req_flow_ctrl;
+		if (bp->req_flow_ctrl == FLOW_CTRL_AUTO) {
+			bp->flow_ctrl = FLOW_CTRL_BOTH;
+		}
+	}
+	DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
+}
+
+static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
+{
+	bp->link_status = 0;
+
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+		DP(NETIF_MSG_LINK, "link up\n");
+
+		bp->link_up = 1;
+		bp->link_status |= LINK_STATUS_LINK_UP;
+
+		if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) {
+			bp->duplex = DUPLEX_FULL;
+		} else {
+			bp->duplex = DUPLEX_HALF;
+		}
+
+		bnx2x_flow_ctrl_resolve(bp, gp_status);
+
+		switch (gp_status & GP_STATUS_SPEED_MASK) {
+		case GP_STATUS_10M:
+			bp->line_speed = SPEED_10;
+			if (bp->duplex == DUPLEX_FULL) {
+				bp->link_status |= LINK_10TFD;
+			} else {
+				bp->link_status |= LINK_10THD;
+			}
+			break;
+
+		case GP_STATUS_100M:
+			bp->line_speed = SPEED_100;
+			if (bp->duplex == DUPLEX_FULL) {
+				bp->link_status |= LINK_100TXFD;
+			} else {
+				bp->link_status |= LINK_100TXHD;
+			}
+			break;
+
+		case GP_STATUS_1G:
+		case GP_STATUS_1G_KX:
+			bp->line_speed = SPEED_1000;
+			if (bp->duplex == DUPLEX_FULL) {
+				bp->link_status |= LINK_1000TFD;
+			} else {
+				bp->link_status |= LINK_1000THD;
+			}
+			break;
+
+		case GP_STATUS_2_5G:
+			bp->line_speed = SPEED_2500;
+			if (bp->duplex == DUPLEX_FULL) {
+				bp->link_status |= LINK_2500TFD;
+			} else {
+				bp->link_status |= LINK_2500THD;
+			}
+			break;
+
+		case GP_STATUS_5G:
+		case GP_STATUS_6G:
+			BNX2X_ERR("link speed unsupported  gp_status 0x%x",
+				  gp_status);
+			break;
+
+		case GP_STATUS_10G_KX4:
+		case GP_STATUS_10G_HIG:
+		case GP_STATUS_10G_CX4:
+			bp->line_speed = SPEED_10000;
+			bp->link_status |= LINK_10GTFD;
+			break;
+
+		case GP_STATUS_12G_HIG:
+			bp->line_speed = SPEED_12000;
+			bp->link_status |= LINK_12GTFD;
+			break;
+
+		case GP_STATUS_12_5G:
+			bp->line_speed = SPEED_12500;
+			bp->link_status |= LINK_12_5GTFD;
+			break;
+
+		case GP_STATUS_13G:
+			bp->line_speed = SPEED_13000;
+			bp->link_status |= LINK_13GTFD;
+			break;
+
+		case GP_STATUS_15G:
+			bp->line_speed = SPEED_15000;
+			bp->link_status |= LINK_15GTFD;
+			break;
+
+		case GP_STATUS_16G:
+			bp->line_speed = SPEED_16000;
+			bp->link_status |= LINK_16GTFD;
+			break;
+
+		default:
+			BNX2X_ERR("link speed unsupported  gp_status 0x%x",
+				  gp_status);
+			break;
+		}
+
+		bp->link_status |= LINK_STATUS_SERDES_LINK;
+
+		if (bp->req_autoneg & AUTONEG_SPEED) {
+			bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+
+			if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
+				bp->link_status |=
+					LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+			}
+			if (bp->autoneg & AUTONEG_PARALLEL) {
+				bp->link_status |=
+					LINK_STATUS_PARALLEL_DETECTION_USED;
+			}
+		}
+
+		if (bp->flow_ctrl & FLOW_CTRL_TX) {
+		       bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+		}
+		if (bp->flow_ctrl & FLOW_CTRL_RX) {
+		       bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+		}
+
+	} else { /* link_down */
+		DP(NETIF_MSG_LINK, "link down\n");
+
+		bp->link_up = 0;
+
+		bp->line_speed = 0;
+		bp->duplex = DUPLEX_FULL;
+		bp->flow_ctrl = 0;
+	}
+
+	DP(NETIF_MSG_LINK, "gp_status 0x%x  link_up %d\n"
+	   DP_LEVEL "  line_speed %d  duplex %d  flow_ctrl 0x%x"
+		    "  link_status 0x%x\n",
+	   gp_status, bp->link_up, bp->line_speed, bp->duplex, bp->flow_ctrl,
+	   bp->link_status);
+}
+
+static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
+{
+	int port = bp->port;
+
+	/* first reset all status
+	 * we asume only one line will be change at a time */
+	bnx2x_bits_dis(bp, GRCBASE_NIG,
+		       NIG_REGISTERS_STATUS_INTERRUPT_PORT0 + port*4,
+		       (NIG_XGXS0_LINK_STATUS |
+			NIG_SERDES0_LINK_STATUS |
+			NIG_STATUS_INTERRUPT_XGXS0_LINK10G));
+	if (bp->link_up) {
+		if (is_10g) {
+			/* Disable the 10G link interrupt
+			 * by writing 1 to the status register
+			 */
+			DP(NETIF_MSG_LINK, "10G XGXS link up\n");
+			bnx2x_bits_en(bp, GRCBASE_NIG,
+				      NIG_REGISTERS_STATUS_INTERRUPT_PORT0 +
+				      port*4,
+				      NIG_STATUS_INTERRUPT_XGXS0_LINK10G);
+
+		} else if (bp->phy_flags & PHY_XGSX_FLAG) {
+			/* Disable the link interrupt
+			 * by writing 1 to the relevant lane
+			 * in the status register
+			 */
+			DP(NETIF_MSG_LINK, "1G XGXS link up\n");
+			bnx2x_bits_en(bp, GRCBASE_NIG,
+				      NIG_REGISTERS_STATUS_INTERRUPT_PORT0 +
+				      port*4, ((1 << bp->ser_lane) <<
+					       NIG_XGXS0_LINK_STATUS_SIZE));
+
+		} else { /* SerDes */
+			DP(NETIF_MSG_LINK, "SerDes link up\n");
+			/* Disable the link interrupt
+			 * by writing 1 to the status register
+			 */
+			bnx2x_bits_en(bp, GRCBASE_NIG,
+				      NIG_REGISTERS_STATUS_INTERRUPT_PORT0 +
+				      port*4, NIG_SERDES0_LINK_STATUS);
+		}
+
+	} else { /* link_down */
+	}
+}
+
+static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
+{
+	u32 ext_phy_type;
+	u32 ext_phy_addr;
+	u32 local_phy;
+	u32 val = 0;
+	u32 rx_sd, pcs_status;
+
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		local_phy = bp->phy_addr;
+		ext_phy_addr = ((bp->ext_phy_config &
+				 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+				PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+		bp->phy_addr = (u8)ext_phy_addr;
+
+		ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+			DP(NETIF_MSG_LINK, "XGXS Direct\n");
+			val = 1;
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+			DP(NETIF_MSG_LINK, "XGXS 8705\n");
+			bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
+					  EXT_PHY_OPT_LASI_STATUS, &val);
+			DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
+
+			bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
+					  EXT_PHY_OPT_LASI_STATUS, &val);
+			DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
+
+			bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					  EXT_PHY_OPT_PMD_RX_SD, &val);
+			val = (val & 0x1);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+			DP(NETIF_MSG_LINK, "XGXS 8706\n");
+			bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					  EXT_PHY_OPT_LASI_STATUS, &val);
+			DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
+
+			bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					  EXT_PHY_OPT_LASI_STATUS, &val);
+			DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
+
+			bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					  EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
+			bnx2x_mdio45_read(bp, EXT_PHY_OPT_PCS_DEVAD,
+					 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
+			DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
+			   "  pcs_status 0x%x\n", rx_sd, pcs_status);
+			/* link is up if both bit 0 of pmd_rx and
+			 * bit 0 of pcs_status are set
+			 */
+			val = (rx_sd & pcs_status);
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
+			   bp->ext_phy_config);
+			val = 0;
+			break;
+		}
+		bp->phy_addr = local_phy;
+
+	} else { /* SerDes */
+		ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+			DP(NETIF_MSG_LINK, "SerDes Direct\n");
+			val = 1;
+			break;
+
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+			DP(NETIF_MSG_LINK, "SerDes 5482\n");
+			val = 1;
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
+			   bp->ext_phy_config);
+			val = 0;
+			break;
+		}
+	}
+
+	return val;
+}
+
+static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
+{
+	int port = bp->port;
+	u32 bmac_addr = port ? NIG_REGISTERS_INGRESS_BMAC1_MEM :
+			       NIG_REGISTERS_INGRESS_BMAC0_MEM;
+	u32 wb_write[2];
+	u32 val;
+
+	DP(NETIF_MSG_LINK, "enableing BigMAC\n");
+	/* reset and unreset the BigMac */
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_2_SET,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+	/* enable access for bmac registers */
+	NIG_WR(NIG_REGISTERS_BMAC0_REGS_OUT_EN + port*4, 0x1);
+
+	/* XGXS control */
+	wb_write[0] = 0x3c;
+	wb_write[1] = 0;
+	bnx2x_wb_write_dmae(bp,
+			    BMAC_ADDR(BIGMAC_REGISTER_BMAC_XGXS_CONTROL),
+			    wb_write, 2);
+
+	/* tx MAC SA */
+	wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
+		       (bp->dev->dev_addr[3] << 16) |
+		       (bp->dev->dev_addr[4] << 8) |
+		       bp->dev->dev_addr[5]);
+	wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
+		       bp->dev->dev_addr[1]);
+	bnx2x_wb_write_dmae(bp, BMAC_ADDR(BIGMAC_REGISTER_TX_SOURCE_ADDR),
+			    wb_write, 2);
+
+	/* tx control */
+	val = 0x0000c0;
+	if (bp->flow_ctrl & FLOW_CTRL_TX) {
+		val |= 0x800000;
+	}
+	wb_write[0] = val;
+	wb_write[1] = 0;
+	bnx2x_wb_write_dmae(bp, BMAC_ADDR(BIGMAC_REGISTER_TX_CONTROL),
+			    wb_write, 2);
+
+	/* set tx mtu */
+	wb_write[0] = bp->rx_buf_use_size;
+	wb_write[1] = 0;
+	bnx2x_wb_write_dmae(bp, BMAC_ADDR(BIGMAC_REGISTER_TX_MAX_SIZE),
+			    wb_write, 2);
+
+	/* mac control */
+	val = 0x3;
+	if (is_lb) {
+		val |= 0x4;
+		DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+	}
+	wb_write[0] = val;
+	wb_write[1] = 0;
+	bnx2x_wb_write_dmae(bp, BMAC_ADDR(BIGMAC_REGISTER_BMAC_CONTROL),
+			    wb_write, 2);
+
+	/* rx control set to don't strip crc */
+	val = 0x14;
+	if (bp->flow_ctrl & FLOW_CTRL_RX) {
+		val |= 0x20;
+	}
+	wb_write[0] = val;
+	wb_write[1] = 0;
+	bnx2x_wb_write_dmae(bp, BMAC_ADDR(BIGMAC_REGISTER_RX_CONTROL),
+			    wb_write, 2);
+
+	/* set rx mtu */
+	wb_write[0] = bp->rx_buf_use_size;
+	wb_write[1] = 0;
+	bnx2x_wb_write_dmae(bp, BMAC_ADDR(BIGMAC_REGISTER_RX_MAX_SIZE),
+			    wb_write, 2);
+
+	/* set cnt max size */
+	wb_write[0] = bp->rx_buf_use_size;
+	wb_write[1] = 0;
+	bnx2x_wb_write_dmae(bp, BMAC_ADDR(BIGMAC_REGISTER_CNT_MAX_SIZE),
+			    wb_write, 2);
+
+	/* configure safc */
+	wb_write[0] = 0x1000200;
+	wb_write[1] = 0;
+	bnx2x_wb_write_dmae(bp,
+			    BMAC_ADDR(BIGMAC_REGISTER_RX_LLFC_MSG_FLDS),
+			    wb_write, 2);
+
+	/* fix for emulation */
+	if (CHIP_REV(bp) == CHIP_REV_EMUL) {
+		BMAC_WR(BIGMAC_REGISTER_TX_PAUSE_THRESHOLD, 0xF000);
+		BMAC_WR(BIGMAC_REGISTER_TX_PAUSE_THRESHOLD + 4, 0);
+	}
+
+	/* reset old bmac stats */
+	memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
+
+	NIG_WR(NIG_REGISTERS_XCM0_OUT_EN + port*4, 0x0);
+
+	/* select XGXS */
+	NIG_WR(NIG_REGISTERS_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
+	NIG_WR(NIG_REGISTERS_XGXS_LANE_SEL_P0 + port*4, 0x0);
+
+	/* disable the NIG in/out to the emac */
+	NIG_WR(NIG_REGISTERS_EMAC0_IN_EN + port*4, 0x0);
+	NIG_WR(NIG_REGISTERS_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
+	NIG_WR(NIG_REGISTERS_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
+
+	/* enable the NIG in/out to the bmac */
+	NIG_WR(NIG_REGISTERS_EGRESS_EMAC0_PORT + port*4, 0x0);
+
+	NIG_WR(NIG_REGISTERS_BMAC0_IN_EN + port*4, 0x1);
+	val = 0;
+	if (bp->flow_ctrl & FLOW_CTRL_TX) {
+		val = 1;
+	}
+	NIG_WR(NIG_REGISTERS_BMAC0_PAUSE_OUT_EN + port*4, val);
+	NIG_WR(NIG_REGISTERS_BMAC0_OUT_EN + port*4, 0x1);
+
+	bp->phy_flags |= PHY_BMAC_FLAG;
+
+	atomic_set(&bp->stats_state, STATS_STATE_ENABLE);
+}
+
+static void bnx2x_emac_enable(struct bnx2x *bp)
+{
+	u32 val;
+	int port = bp->port;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+	DP(NETIF_MSG_LINK, "enableing EMAC\n");
+	/* reset and unreset the emac core */
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_2_SET,
+	       (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+
+	/* enable emac and not bmac */
+	NIG_WR(NIG_REGISTERS_EGRESS_EMAC0_PORT + port*4, 1);
+
+	/* for paladium */
+	if (CHIP_REV(bp) == CHIP_REV_EMUL) {
+		/* Use lane 1 (of lanes 0-3) */
+		NIG_WR(NIG_REGISTERS_XGXS_LANE_SEL_P0 + port*4, 1);
+		NIG_WR(NIG_REGISTERS_XGXS_SERDES0_MODE_SEL + port*4, 1);
+	}
+	/* for fpga */
+	else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
+		/* Use lane 1 (of lanes 0-3) */
+		NIG_WR(NIG_REGISTERS_XGXS_LANE_SEL_P0 + port*4, 1);
+		NIG_WR(NIG_REGISTERS_XGXS_SERDES0_MODE_SEL + port*4, 0);
+	}
+	/* ASIC */
+	else {
+		if (bp->phy_flags & PHY_XGSX_FLAG) {
+			DP(NETIF_MSG_LINK, "XGXS\n");
+			/* select the master lanes (out of 0-3) */
+			NIG_WR(NIG_REGISTERS_XGXS_LANE_SEL_P0 + port*4,
+			       bp->ser_lane);
+			/* select XGXS */
+			NIG_WR(NIG_REGISTERS_XGXS_SERDES0_MODE_SEL + port*4,
+			       1);
+
+		} else { /* SerDes */
+			DP(NETIF_MSG_LINK, "SerDes\n");
+			/* select SerDes */
+			NIG_WR(NIG_REGISTERS_XGXS_SERDES0_MODE_SEL + port*4,
+			       0);
+		}
+	}
+
+	/* enable emac */
+	NIG_WR(NIG_REGISTERS_NIG_EMAC0_EN + port*4, 1);
+
+	/* init emac - use read-modify-write */
+	/* self clear reset */
+	val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MODE);
+	EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+
+	while (val & EMAC_MODE_RESET) {
+		val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MODE);
+		DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
+	}
+
+	/* reset tx part */
+	EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
+
+	while (val & EMAC_TX_MODE_RESET) {
+		val = REG_RD(bp, emac_base, EMAC_REG_EMAC_TX_MODE);
+		DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
+	}
+
+	if (CHIP_REV_IS_SLOW(bp)) {
+		/* config GMII mode */
+		val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MODE);
+		EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
+
+	} else { /* ASIC */
+		/* pause enable/disable */
+		bnx2x_bits_dis(bp, emac_base, EMAC_REG_EMAC_RX_MODE,
+			       EMAC_RX_MODE_FLOW_EN);
+		if (bp->flow_ctrl & FLOW_CTRL_RX) {
+			bnx2x_bits_en(bp, emac_base, EMAC_REG_EMAC_RX_MODE,
+				      EMAC_RX_MODE_FLOW_EN);
+		}
+
+		bnx2x_bits_dis(bp, emac_base, EMAC_REG_EMAC_TX_MODE,
+			       EMAC_TX_MODE_EXT_PAUSE_EN);
+		if (bp->flow_ctrl & FLOW_CTRL_TX) {
+			bnx2x_bits_en(bp, emac_base, EMAC_REG_EMAC_TX_MODE,
+				      EMAC_TX_MODE_EXT_PAUSE_EN);
+		}
+	}
+
+	/* KEEP_VLAN_TAG, promiscous */
+	val = REG_RD(bp, emac_base, EMAC_REG_EMAC_RX_MODE);
+	val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+	EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
+
+	/* identify magic packets */
+	val = REG_RD(bp, emac_base, EMAC_REG_EMAC_MODE);
+	EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
+
+	/* enable emac for jumbo packets */
+	EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
+		(EMAC_RX_MTU_SIZE_JUMBO_ENA | (bp->rx_buf_use_size)));
+
+	/* strip CRC */
+	NIG_WR(NIG_REGISTERS_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
+
+	val = ((bp->dev->dev_addr[0] << 8) |
+	       bp->dev->dev_addr[1]);
+	EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
+
+	val = ((bp->dev->dev_addr[2] << 24) |
+	       (bp->dev->dev_addr[3] << 16) |
+	       (bp->dev->dev_addr[4] << 8) |
+	       bp->dev->dev_addr[5]);
+	EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
+
+	/* disable the NIG in/out to the bmac */
+	NIG_WR(NIG_REGISTERS_BMAC0_IN_EN + port*4, 0x0);
+	NIG_WR(NIG_REGISTERS_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
+	NIG_WR(NIG_REGISTERS_BMAC0_OUT_EN + port*4, 0x0);
+
+	/* enable the NIG in/out to the emac */
+	NIG_WR(NIG_REGISTERS_EMAC0_IN_EN + port*4, 0x1);
+	val = 0;
+	if (bp->flow_ctrl & FLOW_CTRL_TX) {
+		val = 1;
+	}
+	NIG_WR(NIG_REGISTERS_EMAC0_PAUSE_OUT_EN + port*4, val);
+	NIG_WR(NIG_REGISTERS_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
+
+	if (CHIP_REV(bp) == CHIP_REV_FPGA) {
+		/* take the BigMac out of reset */
+		REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_2_SET,
+		       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+		/* enable access for bmac registers */
+		NIG_WR(NIG_REGISTERS_BMAC0_REGS_OUT_EN + port*4, 0x1);
+	}
+
+	bp->phy_flags |= PHY_EMAC_FLAG;
+
+	atomic_set(&bp->stats_state, STATS_STATE_ENABLE);
+}
+
+static void bnx2x_emac_program(struct bnx2x *bp)
+{
+	u16 mode = 0;
+	int port = bp->port;
+
+	DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
+	bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400, EMAC_REG_EMAC_MODE,
+		       (EMAC_MODE_25G_MODE |
+			EMAC_MODE_PORT_MII_10M |
+			EMAC_MODE_HALF_DUPLEX));
+	switch (bp->line_speed) {
+	case SPEED_10:
+		mode |= EMAC_MODE_PORT_MII_10M;
+		break;
+
+	case SPEED_100:
+		mode |= EMAC_MODE_PORT_MII;
+		break;
+
+	case SPEED_1000:
+		mode |= EMAC_MODE_PORT_GMII;
+		break;
+
+	case SPEED_2500:
+		mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
+		break;
+
+	default:
+		/* 10G not valid for EMAC */
+		BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
+		break;
+	}
+
+	if (bp->duplex == DUPLEX_HALF) {
+		mode |= EMAC_MODE_HALF_DUPLEX;
+	}
+	bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400,
+		      EMAC_REG_EMAC_MODE, mode);
+
+	bnx2x_leds_set(bp, bp->line_speed);
+}
+
+static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
+{
+	u32 lp_up2;
+	u32 tx_driver;
+
+	/* read precomp */
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
+	bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
+
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
+	bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
+
+	/* bits [10:7] at lp_up2, positioned at [15:12] */
+	lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
+		   MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
+		  MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
+
+	if ((lp_up2 != 0) &&
+	    (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
+		/* replace tx_driver bits [15:12] */
+		tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
+		tx_driver |= lp_up2;
+		bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
+	}
+}
+
+static void bnx2x_pbf_update(struct bnx2x *bp)
+{
+	int port = bp->port;
+	u32 init_crd, crd;
+	u32 count = 1000;
+	u32 pause = 0;
+
+	/* disable port */
+	REG_WR(bp, GRCBASE_PBF,
+	       PBF_REGISTERS_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
+
+	/* wait for init credit */
+	init_crd = REG_RD(bp, GRCBASE_PBF,
+			  PBF_REGISTERS_P0_INIT_CRD + port*4);
+	crd = REG_RD(bp, GRCBASE_PBF, PBF_REGISTERS_P0_CREDIT + port*8);
+	DP(NETIF_MSG_LINK, "init_crd 0x%x  crd 0x%x\n", init_crd, crd);
+
+	while ((init_crd != crd) && count) {
+		mdelay(5);
+
+		crd = REG_RD(bp, GRCBASE_PBF,
+			     PBF_REGISTERS_P0_CREDIT + port*8);
+		count--;
+	}
+	crd = REG_RD(bp, GRCBASE_PBF, PBF_REGISTERS_P0_CREDIT + port*8);
+	if (init_crd != crd) {
+		/* TBD? Assert if still init_crd != crd */
+		BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
+	}
+
+	if (bp->flow_ctrl & FLOW_CTRL_RX) {
+		pause = 1;
+	}
+	REG_WR(bp, GRCBASE_PBF,
+	       PBF_REGISTERS_P0_PAUSE_ENABLE + port*4, pause);
+	if (pause) {
+		/* update threshold */
+		REG_WR(bp, GRCBASE_PBF,
+		       PBF_REGISTERS_P0_ARB_THRSH + port*4, 0);
+		/* update init credit */
+		init_crd = 778;		/* (800-18-4) */
+
+	} else {
+		/* update threshold */
+		REG_WR(bp, GRCBASE_PBF,
+		       PBF_REGISTERS_P0_ARB_THRSH + port*4,
+		       bp->rx_buf_use_size/16);
+		/* update init credit */
+		switch (bp->line_speed) {
+		case SPEED_10:
+		case SPEED_100:
+		case SPEED_1000:
+			init_crd = bp->rx_buf_use_size/16 + 55 - 22;
+			break;
+
+		case SPEED_2500:
+			init_crd = bp->rx_buf_use_size/16 + 138 - 22;
+			break;
+
+		case SPEED_10000:
+			init_crd = bp->rx_buf_use_size/16 + 553 - 22;
+			break;
+
+		default:
+			BNX2X_ERR("Invalid line_speed 0x%x\n",
+				  bp->line_speed);
+			break;
+		}
+	}
+	REG_WR(bp, GRCBASE_PBF, PBF_REGISTERS_P0_INIT_CRD + port*4, init_crd);
+	DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
+	   bp->line_speed, init_crd);
+
+	/* probe the credit changes */
+	REG_WR(bp, GRCBASE_PBF, PBF_REGISTERS_INIT_P0 + port*4, 0x1);
+	mdelay(5);
+	REG_WR(bp, GRCBASE_PBF, PBF_REGISTERS_INIT_P0 + port*4, 0x0);
+
+	/* enable port */
+	REG_WR(bp, GRCBASE_PBF,
+	       PBF_REGISTERS_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
+}
+
+static void bnx2x_update_mng(struct bnx2x *bp)
+{
+	if (!nomcp) {
+		SHMEM_WR(bp, drv_fw_mb[bp->port].link_status,
+			 bp->link_status);
+	}
+}
+
+static void bnx2x_link_report(struct bnx2x *bp)
+{
+	if (bp->link_up) {
+		netif_carrier_on(bp->dev);
+		printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
+
+		printk("%d Mbps ", bp->line_speed);
+
+		if (bp->duplex == DUPLEX_FULL)
+			printk("full duplex");
+		else
+			printk("half duplex");
+
+		if (bp->flow_ctrl) {
+			if (bp->flow_ctrl & FLOW_CTRL_RX) {
+				printk(", receive ");
+				if (bp->flow_ctrl & FLOW_CTRL_TX)
+					printk("& transmit ");
+			} else {
+				printk(", transmit ");
+			}
+			printk("flow control ON");
+		}
+		printk("\n");
+
+	} else { /* link_down */
+		netif_carrier_off(bp->dev);
+		printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
+	}
+}
+
+static void bnx2x_link_up(struct bnx2x *bp)
+{
+	int port = bp->port;
+
+	/* PBF - link up */
+	bnx2x_pbf_update(bp);
+
+	/* disable drain */
+	NIG_WR(NIG_REGISTERS_EGRESS_DRAIN0_MODE + port*4, 0);
+
+	/* update share memory */
+	bnx2x_update_mng(bp);
+
+	/* indicate link up */
+	bnx2x_link_report(bp);
+}
+
+static void bnx2x_link_down(struct bnx2x *bp)
+{
+	int port = bp->port;
+
+	/* notify stats */
+	atomic_set(&bp->stats_state, STATS_STATE_STOP);
+	DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
+
+	/* indicate link down */
+	bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
+
+	/* reset BigMac */
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+	/* ignore drain flag interrupt */
+	/* activate nig drain */
+	NIG_WR(NIG_REGISTERS_EGRESS_DRAIN0_MODE + port*4, 1);
+
+	/* update share memory */
+	bnx2x_update_mng(bp);
+
+	/* indicate link down */
+	bnx2x_link_report(bp);
+}
+
+/* This function is called upon link interrupt */
+static void bnx2x_link_update(struct bnx2x *bp)
+{
+	u32 gp_status;
+	int port = bp->port;
+	int i;
+	int link_10g;
+
+	DP(NETIF_MSG_LINK, "port %x, is xgxs %x, stat_mask 0x%x, "
+	   "int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
+	   " 10G %x, XGXS_LINK %x\n",
+	   port, (bp->phy_flags & PHY_XGSX_FLAG),
+	   REG_RD(bp, GRCBASE_NIG,
+		  NIG_REGISTERS_STATUS_INTERRUPT_PORT0 + port*4),
+	   REG_RD(bp, GRCBASE_NIG,
+		  NIG_REGISTERS_MASK_INTERRUPT_PORT0 + port*4),
+	   bp->nig_mask,
+	   REG_RD(bp, GRCBASE_NIG,
+		  NIG_REGISTERS_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+	   REG_RD(bp, GRCBASE_NIG,
+		  NIG_REGISTERS_SERDES0_STATUS_LINK_STATUS + port*0x3c),
+	   REG_RD(bp, GRCBASE_NIG,
+		  NIG_REGISTERS_XGXS0_STATUS_LINK10G + port*0x68),
+	   REG_RD(bp, GRCBASE_NIG,
+		  NIG_REGISTERS_XGXS0_STATUS_LINK_STATUS + port*0x68)
+	);
+
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
+	/* avoid fast toggling */
+	for (i = 0 ; i < 10 ; i++) {
+		mdelay(10);
+		bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
+				  &gp_status);
+	}
+
+	bnx2x_link_settings_status(bp, gp_status);
+
+	/* anything 10 and over uses the bmac */
+	link_10g = ((bp->line_speed >= SPEED_10000) &&
+		    (bp->line_speed <= SPEED_16000));
+
+	bnx2x_link_int_ack(bp, link_10g);
+
+	/* link is up only if both local phy and external phy are up */
+	if (bp->link_up && bnx2x_ext_phy_is_link_up(bp)) {
+		if (link_10g) {
+			bnx2x_bmac_enable(bp, 0);
+			bnx2x_leds_set(bp, SPEED_10000);
+
+		} else {
+			bnx2x_emac_enable(bp);
+			bnx2x_emac_program(bp);
+
+			/* AN complete? */
+			if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
+				if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
+					bnx2x_set_sgmii_tx_driver(bp);
+				}/* Not SGMII */
+			}
+		}
+		bnx2x_link_up(bp);
+
+	} else { /* link down */
+		bnx2x_leds_unset(bp);
+		bnx2x_link_down(bp);
+	}
+}
+
+/****************************************************************************
+* Init service functions
+****************************************************************************/
+
+static void bnx2x_set_aer_mmd(struct bnx2x *bp)
+{
+	u16 offset = (bp->phy_flags & PHY_XGSX_FLAG) ?
+					(bp->phy_addr + bp->ser_lane) : 0;
+
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
+	bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
+}
+
+static void bnx2x_set_master_ln(struct bnx2x *bp)
+{
+	u32 new_master_ln;
+
+	/* set the master_ln for AN */
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
+	bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+			  &new_master_ln);
+	bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+			   (new_master_ln | bp->ser_lane));
+}
+
+static void bnx2x_reset_unicore(struct bnx2x *bp)
+{
+	u32 mii_control;
+	int i;
+
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+	bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+	/* reset the unicore */
+	bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+			   (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+
+	/* wait for the reset to self clear */
+	for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
+		udelay(5);
+
+		/* the reset erased the previous bank value */
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+		bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+
+		if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
+			udelay(5);
+			return;
+		}
+	}
+
+	/* TBD? Assert if the control is still in reset */
+	BNX2X_ERR("BUG! unicore is still in reset!\n");
+}
+
+static void bnx2x_set_swap_lanes(struct bnx2x *bp)
+{
+	/* Each two bits represents a lane number:
+	   No swap is 0123 => 0x1b no need to enable the swap */
+
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
+	if (bp->rx_lane_swap != 0x1b) {
+		bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+				   (bp->rx_lane_swap |
+				    MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+				   MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+	} else {
+		bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+	}
+
+	if (bp->tx_lane_swap != 0x1b) {
+		bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+				   (bp->tx_lane_swap |
+				    MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+	} else {
+		bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+	}
+}
+
+static void bnx2x_set_parallel_detection(struct bnx2x *bp)
+{
+	u32 control2;
+
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
+	bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+			  &control2);
+
+	if (bp->autoneg & AUTONEG_PARALLEL) {
+		control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	} else {
+		control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	}
+	bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+			   control2);
+
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		DP(NETIF_MSG_LINK, "XGXS\n");
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
+
+		bnx2x_mdio22_write(bp,
+				   MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+			       MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+
+		bnx2x_mdio22_read(bp,
+				 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+				  &control2);
+
+		if (bp->autoneg & AUTONEG_PARALLEL) {
+			control2 |=
+		    MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+		} else {
+			control2 &=
+		   ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+		}
+		bnx2x_mdio22_write(bp,
+				 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+				   control2);
+	}
+}
+
+static void bnx2x_set_autoneg(struct bnx2x *bp)
+{
+	u32 reg_val;
+
+	/* CL37 Autoneg */
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+	bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+	if ((bp->req_autoneg & AUTONEG_SPEED) &&
+	    (bp->autoneg & AUTONEG_CL37)) {
+		/* CL37 Autoneg Enabled */
+		reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
+	} else {
+		/* CL37 Autoneg Disabled */
+		reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+			     MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
+	}
+	bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+	/* Enable/Disable Autodetection */
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
+	bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+	reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
+
+	if ((bp->req_autoneg & AUTONEG_SPEED) &&
+	    (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
+		reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+	} else {
+		reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+	}
+	bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+
+	/* Enable TetonII and BAM autoneg */
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
+	bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+			  &reg_val);
+	if ((bp->req_autoneg & AUTONEG_SPEED) &&
+	    (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
+		/* Enable BAM aneg Mode and TetonII aneg Mode */
+		reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+			    MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+	} else {
+		/* TetonII and BAM Autoneg Disabled */
+		reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+			     MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+	}
+	bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+			   reg_val);
+
+	/* Enable Clause 73 Aneg */
+	if ((bp->req_autoneg & AUTONEG_SPEED) &&
+	    (bp->autoneg & AUTONEG_CL73)) {
+		/* Enable BAM Station Manager */
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
+		bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
+				   (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
+
+		/* Merge CL73 and CL37 aneg resolution */
+		bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
+				  &reg_val);
+		bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
+				   (reg_val |
+			MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
+
+		/* Set the CL73 AN speed */
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
+		bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
+		/* In the SerDes we support only the 1G.
+		   In the XGXS we support the 10G KX4
+		   but we currently do not support the KR */
+		if (bp->phy_flags & PHY_XGSX_FLAG) {
+			DP(NETIF_MSG_LINK, "XGXS\n");
+			/* 10G KX4 */
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+		} else {
+			DP(NETIF_MSG_LINK, "SerDes\n");
+			/* 1000M KX */
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
+		}
+		bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
+
+		/* CL73 Autoneg Enabled */
+		reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
+	} else {
+		/* CL73 Autoneg Disabled */
+		reg_val = 0;
+	}
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
+	bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+}
+
+/* program SerDes, forced modes */
+static void bnx2x_program_serdes(struct bnx2x *bp)
+{
+	u32 reg_val;
+
+	/* program duplex, disable autoneg */
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+	bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+	reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
+		     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
+	if (bp->req_duplex == DUPLEX_FULL) {
+		reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+	}
+	bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+	/* program speed
+	   - needed only if the speed is greater than 1G (2.5G or 10G) */
+	if (bp->req_line_speed > SPEED_1000) {
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
+		bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+		/* clearing the speed value before setting the right speed */
+		reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
+		reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
+			    MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+		if (bp->req_line_speed == SPEED_10000) {
+			reg_val |=
+				MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
+		}
+		bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
+	}
+}
+
+static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
+{
+	u32 val = 0;
+
+	/* configure the 48 bits for BAM AN */
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
+
+	/* set extended capabilities */
+	if (bp->advertising & ADVERTISED_2500baseT_Full) {
+		val |= MDIO_OVER_1G_UP1_2_5G;
+	}
+	if (bp->advertising & ADVERTISED_10000baseT_Full) {
+		val |= MDIO_OVER_1G_UP1_10G;
+	}
+	bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
+
+	bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
+}
+
+static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
+{
+	u32 an_adv;
+
+	/* for AN, we are always publishing full duplex */
+	an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+
+	/* set pause */
+	switch (bp->pause_mode) {
+	case PAUSE_SYMMETRIC:
+		an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+		break;
+	case PAUSE_ASYMMETRIC:
+		an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+		break;
+	case PAUSE_BOTH:
+		an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+		break;
+	case PAUSE_NONE:
+		an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+		break;
+	}
+
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+	bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
+}
+
+static void bnx2x_restart_autoneg(struct bnx2x *bp)
+{
+	if (bp->autoneg & AUTONEG_CL73) {
+		/* enable and restart clause 73 aneg */
+		u32 an_ctrl;
+
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
+		bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  &an_ctrl);
+		bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				   (an_ctrl |
+				    MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+				MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+
+	} else {
+		/* Enable and restart BAM/CL37 aneg */
+		u32 mii_control;
+
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+		bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+		bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+				   (mii_control |
+				    MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+				    MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+	}
+}
+
+static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
+{
+	u32 control1;
+
+	/* in SGMII mode, the unicore is always slave */
+	MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
+	bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+			  &control1);
+	control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
+	/* set sgmii mode (and not fiber) */
+	control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
+		      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
+		      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
+	bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+			   control1);
+
+	/* if forced mode */
+	if (!(bp->req_autoneg & AUTONEG_SPEED)) {
+		/* set speed, disable autoneg */
+		u32 mii_control;
+
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+		bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+		mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+			       MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
+				 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
+
+		switch (bp->req_line_speed) {
+		case SPEED_100:
+			mii_control |=
+				MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
+			break;
+		case SPEED_1000:
+			mii_control |=
+				MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
+			break;
+		case SPEED_10:
+			/* there is nothing to set for 10M */
+			break;
+		default:
+			/* invalid speed for SGMII */
+			DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
+			   bp->req_line_speed);
+			break;
+		}
+
+		/* setting the full duplex */
+		if (bp->req_duplex == DUPLEX_FULL) {
+			mii_control |=
+				MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+		}
+		bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+				   mii_control);
+
+	} else { /* AN mode */
+		/* enable and restart AN */
+		bnx2x_restart_autoneg(bp);
+	}
+}
+
+static void bnx2x_link_int_enable(struct bnx2x *bp)
+{
+	int port = bp->port;
+
+	/* setting the status to report on link up
+	   for either XGXS or SerDes */
+	bnx2x_bits_dis(bp, GRCBASE_NIG,
+		       NIG_REGISTERS_STATUS_INTERRUPT_PORT0 + port*4,
+		       (NIG_XGXS0_LINK_STATUS |
+			NIG_STATUS_INTERRUPT_XGXS0_LINK10G |
+			NIG_SERDES0_LINK_STATUS));
+
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		/* TBD -
+		 * in force mode (not AN) we can enable just the relevant
+		 * interrupt
+		 * Even in AN we might enable only one according to the AN
+		 * speed mask
+		 */
+		bnx2x_bits_en(bp, GRCBASE_NIG,
+			      NIG_REGISTERS_MASK_INTERRUPT_PORT0 + port*4,
+			      (NIG_MASK_XGXS0_LINK_STATUS |
+			       NIG_MASK_XGXS0_LINK10G));
+		DP(NETIF_MSG_LINK, "enable XGXS interrupt\n");
+
+	} else { /* SerDes */
+		bnx2x_bits_en(bp, GRCBASE_NIG,
+			      NIG_REGISTERS_MASK_INTERRUPT_PORT0 + port*4,
+			      NIG_MASK_SERDES0_LINK_STATUS);
+		DP(NETIF_MSG_LINK, "enable SerDes interrupt\n");
+	}
+}
+
+static void bnx2x_ext_phy_init(struct bnx2x *bp)
+{
+	int port = bp->port;
+	u32 ext_phy_type;
+	u32 ext_phy_addr;
+	u32 local_phy;
+
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		local_phy = bp->phy_addr;
+		ext_phy_addr = ((bp->ext_phy_config &
+				 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+				PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+
+		ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+			DP(NETIF_MSG_LINK, "XGXS Direct\n");
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+			DP(NETIF_MSG_LINK, "XGXS 8705\n");
+			bnx2x_bits_en(bp, GRCBASE_NIG,
+				      NIG_REGISTERS_MASK_INTERRUPT_PORT0 +
+				      port*4, NIG_MASK_MI_INT);
+			DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
+
+			bp->phy_addr = ext_phy_type;
+			bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					    EXT_PHY_OPT_PMD_MISC_CNTL,
+					    0x8288);
+			bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					    EXT_PHY_OPT_PHY_IDENTIFIER,
+					    0x7fbf);
+			bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					    EXT_PHY_OPT_CMU_PLL_BYPASS,
+					    0x0100);
+			bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_WIS_DEVAD,
+					    EXT_PHY_OPT_LASI_CNTL, 0x1);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+			DP(NETIF_MSG_LINK, "XGXS 8706\n");
+			bnx2x_bits_en(bp, GRCBASE_NIG,
+				      NIG_REGISTERS_MASK_INTERRUPT_PORT0 +
+				      port*4, NIG_MASK_MI_INT);
+			DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
+
+			bp->phy_addr = ext_phy_type;
+			bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					    EXT_PHY_OPT_PMD_DIGITAL_CNT,
+					    0x400);
+			bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					    EXT_PHY_OPT_LASI_CNTL, 0x1);
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
+			   bp->ext_phy_config);
+			break;
+		}
+		bp->phy_addr = local_phy;
+
+	} else { /* SerDes */
+/*		ext_phy_addr = ((bp->ext_phy_config &
+				 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
+				PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
+*/
+		ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+			DP(NETIF_MSG_LINK, "SerDes Direct\n");
+			break;
+
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+			DP(NETIF_MSG_LINK, "SerDes 5482\n");
+			bnx2x_bits_en(bp, GRCBASE_NIG,
+				      NIG_REGISTERS_MASK_INTERRUPT_PORT0 +
+				      port*4, NIG_MASK_MI_INT);
+			DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
+			   bp->ext_phy_config);
+			break;
+		}
+	}
+}
+
+static void bnx2x_ext_phy_reset(struct bnx2x *bp)
+{
+	u32 ext_phy_type;
+	u32 ext_phy_addr;
+	u32 local_phy;
+
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+			DP(NETIF_MSG_LINK, "XGXS Direct\n");
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+			DP(NETIF_MSG_LINK, "XGXS 8705/6\n");
+			local_phy = bp->phy_addr;
+			ext_phy_addr = ((bp->ext_phy_config &
+					PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+					PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+			bp->phy_addr = (u8)ext_phy_addr;
+			bnx2x_mdio45_write(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+					   EXT_PHY_OPT_CNTL, 0xa040);
+			bp->phy_addr = local_phy;
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
+			   bp->ext_phy_config);
+			break;
+		}
+
+	} else { /* SerDes */
+		ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+			DP(NETIF_MSG_LINK, "SerDes Direct\n");
+			break;
+
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+			DP(NETIF_MSG_LINK, "SerDes 5482\n");
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
+			   bp->ext_phy_config);
+			break;
+		}
+	}
+}
+
+static void bnx2x_link_initialize(struct bnx2x *bp)
+{
+	int port = bp->port;
+
+	/* disable attentions */
+	bnx2x_bits_dis(bp, GRCBASE_NIG,
+		       NIG_REGISTERS_MASK_INTERRUPT_PORT0 + port*4,
+		       (NIG_MASK_XGXS0_LINK_STATUS |
+			NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+
+	bnx2x_ext_phy_reset(bp);
+
+	bnx2x_set_aer_mmd(bp);
+
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		bnx2x_set_master_ln(bp);
+	}
+
+	/* reset the SerDes and wait for reset bit return low */
+	bnx2x_reset_unicore(bp);
+
+	bnx2x_set_aer_mmd(bp);
+
+	/* setting the masterLn_def again after the reset */
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		bnx2x_set_master_ln(bp);
+		bnx2x_set_swap_lanes(bp);
+	}
+
+	/* Set Parallel Detect */
+	if (bp->req_autoneg & AUTONEG_SPEED) {
+		bnx2x_set_parallel_detection(bp);
+	}
+
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		if (bp->req_line_speed < SPEED_1000) {
+			bp->phy_flags |= PHY_SGMII_FLAG;
+		} else {
+			bp->phy_flags &= ~PHY_SGMII_FLAG;
+		}
+	}
+
+	if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
+		u16 bank, rx_eq;
+
+		rx_eq = ((bp->serdes_config &
+			  PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
+			 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
+
+		DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
+		for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
+			    bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
+			MDIO_SET_REG_BANK(bp, bank);
+			bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
+					   ((rx_eq &
+				MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
+				MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
+		}
+
+		/* forced speed requested? */
+		if (!(bp->req_autoneg & AUTONEG_SPEED)) {
+			DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+
+			/* disable autoneg */
+			bnx2x_set_autoneg(bp);
+
+			/* program speed and duplex */
+			bnx2x_program_serdes(bp);
+
+		} else { /* AN_mode */
+			DP(NETIF_MSG_LINK, "not SGNII, AN\n");
+
+			/* AN enabled */
+			bnx2x_set_brcm_cl37_advertisment(bp);
+
+			/* program duplex & pause advertisment (for aneg) */
+			bnx2x_set_ieee_aneg_advertisment(bp);
+
+			/* enable autoneg */
+			bnx2x_set_autoneg(bp);
+
+			/* enalbe and restart AN */
+			bnx2x_restart_autoneg(bp);
+		}
+
+	} else { /* SGMII mode */
+		DP(NETIF_MSG_LINK, "SGMII\n");
+
+		bnx2x_initialize_sgmii_process(bp);
+	}
+
+	/* enable the interrupt */
+	bnx2x_link_int_enable(bp);
+
+	/* init ext phy and enable link state int */
+	bnx2x_ext_phy_init(bp);
+}
+
+static void bnx2x_phy_deassert(struct bnx2x *bp)
+{
+	int port = bp->port;
+	u32 val;
+
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		DP(NETIF_MSG_LINK, "XGXS\n");
+		val = XGSX_RESET_BITS;
+
+	} else { /* SerDes */
+		DP(NETIF_MSG_LINK, "SerDes\n");
+		val = SERDES_RESET_BITS;
+	}
+
+	val = val << (port*16);
+
+	/* reset and unreset the SerDes/XGXS */
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_3_SET, val);
+}
+
+static int bnx2x_phy_init(struct bnx2x *bp)
+{
+	DP(NETIF_MSG_LINK, "started\n");
+	if (CHIP_REV(bp) == CHIP_REV_FPGA) {
+		bp->phy_flags |= PHY_EMAC_FLAG;
+		bp->link_up = 1;
+		bp->line_speed = SPEED_10000;
+		bp->duplex = DUPLEX_FULL;
+		NIG_WR(NIG_REGISTERS_EGRESS_DRAIN0_MODE + bp->port*4, 0);
+		bnx2x_emac_enable(bp);
+		bnx2x_link_report(bp);
+		return 0;
+
+	} else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
+		bp->phy_flags |= PHY_BMAC_FLAG;
+		bp->link_up = 1;
+		bp->line_speed = SPEED_10000;
+		bp->duplex = DUPLEX_FULL;
+		NIG_WR(NIG_REGISTERS_EGRESS_DRAIN0_MODE + bp->port*4, 0);
+		bnx2x_bmac_enable(bp, 0);
+		bnx2x_link_report(bp);
+		return 0;
+
+	} else {
+		bnx2x_phy_deassert(bp);
+		bnx2x_link_initialize(bp);
+	}
+
+	return 0;
+}
+
+static void bnx2x_link_reset(struct bnx2x *bp)
+{
+	int port = bp->port;
+
+	/* disable attentions */
+	bnx2x_bits_dis(bp, GRCBASE_NIG,
+		       NIG_REGISTERS_MASK_INTERRUPT_PORT0 + port*4,
+		       (NIG_MASK_XGXS0_LINK_STATUS |
+			NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+
+	bnx2x_ext_phy_reset(bp);
+
+	/* reset the SerDes/XGXS */
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_3_CLEAR,
+	       (0x1ff << (port*16)));
+
+	/* reset EMAC / BMAC and disable NIG interfaces */
+	NIG_WR(NIG_REGISTERS_BMAC0_IN_EN + port*4, 0);
+	NIG_WR(NIG_REGISTERS_BMAC0_OUT_EN + port*4, 0);
+
+	NIG_WR(NIG_REGISTERS_NIG_EMAC0_EN + port*4, 0);
+	NIG_WR(NIG_REGISTERS_EMAC0_IN_EN + port*4, 0);
+	NIG_WR(NIG_REGISTERS_EGRESS_EMAC0_OUT_EN + port*4, 0);
+
+	NIG_WR(NIG_REGISTERS_EGRESS_DRAIN0_MODE + port*4, 1);
+}
+
+#ifdef BNX2X_XGSX_LB
+static void bnx2x_set_xgxs_loopback (struct bnx2x *bp, int is_10g)
+{
+	int port = bp->port;
+
+	if (is_10g) {
+		u32 md_devad;
+
+		DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+
+		/* change the uni_phy_addr in the nig */
+		REG_RD(bp, GRCBASE_NIG, (NIG_REGISTERS_XGXS0_CTRL_MD_DEVAD +
+					 (port*0x18)), &md_devad);
+		NIG_WR(NIG_REGISTERS_XGXS0_CTRL_MD_DEVAD + (port*0x18), 0x5);
+
+		/* change the aer mmd */
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
+		bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
+
+		/* config combo IEEE0 control reg for loopback */
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
+		bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				   0x6041);
+
+		/* set aer mmd back */
+		bnx2x_set_aer_mmd(bp);
+
+		/* and md_devad */
+		NIG_WR(NIG_REGISTERS_XGXS0_CTRL_MD_DEVAD + (port*0x18),
+		       md_devad);
+
+	} else {
+		u32 mii_control;
+
+		DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+
+		MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+		bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+		bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+				   (mii_control |
+				    MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
+	}
+}
+#endif
+
+/* end of PHY/MAC */
+
+
+static void bnx2x_panic_dump( struct bnx2x *bp)
+{
+	int i;
+	u16 j, start, end;
+
+	BNX2X_ERR("begin crash dump -----------------");
+
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
+
+		BNX2X_ERR("queue number %d, tx_pkt_prod(%x),tx_pkt_cons(%x),"
+			  " tx_bd_prod(%x), tx_bd_cons(%x), *tx_cons_sb(%x),"
+			  " *rx_cons_sb(%x), rx_comp_prod(%x), rx_comp_cons"
+			  "(%x), fp_c_idx(%x), fp_u_idx(%x) bd data(%x,%x)\n",
+			  i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
+			  fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
+			  fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
+			  fp->fp_u_idx, hw_prods->packets_prod,
+			  hw_prods->bds_prod);
+
+		start = TX_BD(*fp->tx_cons_sb - 10);
+		end = TX_BD(*fp->tx_cons_sb + 245);
+		for (j = start; j < end; j++) {
+			struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
+
+			BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
+				  sw_bd->skb, sw_bd->first_bd);
+		}
+
+		start = TX_BD(fp->tx_bd_cons - 10);
+		end = TX_BD(fp->tx_bd_cons + 254);
+
+		for (j = start; j < end; j++) {
+			u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
+
+			BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
+				  j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
+		}
+
+		start = RX_BD(*fp->rx_cons_sb - 10);
+		end = RX_BD(*fp->rx_cons_sb + 503);
+		for (j = start; j < end; j++) {
+			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
+			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
+			BNX2X_ERR("rx_bd[%x]=[%x:%x:%x:%x] sw_bd=[%p]\n",
+				  j, rx_bd[0], rx_bd[1], rx_bd[2],
+				  rx_bd[3], sw_bd->skb);
+		}
+
+
+		start = RX_BD(fp->rx_comp_cons - 10);
+		end = RX_BD(fp->rx_comp_cons + 503);
+		for (j = start; j < end; j++) {
+			u32 *cqe   = (u32 *)&fp->rx_comp_ring[j];
+			BNX2X_ERR( "cqe[%x]=[%x:%x:%x:%x]\n",
+			       j, cqe[0], cqe[1], cqe[2], cqe[3]);
+		}
+	}
+
+	BNX2X_ERR("def_c_idx(%u), def_u_idx(%u), def_t_idx(%u),"
+		  " def_x_idx(%u), def_att_idx(%u), attn_state(%u),"
+		  " spq_prod_idx(%u), spq_con_idx(%u)\n",
+		  bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx,
+		  bp->def_att_idx, bp->attn_state, bp->spq_prod_idx,
+		  bp->spq_con_idx);
+
+	bnx2x_idle_chk(bp);
+	bnx2x_mc_assert(bp);
+	BNX2X_ERR("end crash dump -----------------");
+}
+
+#ifdef BNX2X_IND_REG
+/* no point of inlining since this is slow
+ * need to add a lock if this is ever used other than for debug
+ */
+static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
+{
+	u32 val;
+
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
+	return val;
+}
+
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+{
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
+}
+#endif
+
+static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+{
+	struct host_def_status_block *dsb = bp->def_status_blk;
+	u16 rc = 0;
+
+	if (bp->def_att_idx != dsb->atten_status_block.attn_bits_index) {
+		bp->def_att_idx = dsb->atten_status_block.attn_bits_index;
+		rc |= 1;
+	}
+	if (bp->def_c_idx != dsb->c_def_status_block.status_block_index) {
+		bp->def_c_idx = dsb->c_def_status_block.status_block_index;
+		rc |= 2;
+	}
+	if (bp->def_u_idx != dsb->u_def_status_block.status_block_index) {
+		bp->def_u_idx = dsb->u_def_status_block.status_block_index;
+		rc |= 4;
+	}
+	if (bp->def_x_idx != dsb->x_def_status_block.status_block_index) {
+		bp->def_x_idx = dsb->x_def_status_block.status_block_index;
+		rc |= 8;
+	}
+	if (bp->def_t_idx != dsb->t_def_status_block.status_block_index) {
+		bp->def_t_idx = dsb->t_def_status_block.status_block_index;
+		rc |= 16;
+	}
+	rmb(); /* TBD chack this */
+	return rc;
+}
+
+static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
+{
+	struct host_status_block *fpsb = fp->status_blk;
+	u16 rc = 0;
+
+	if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
+		fp->fp_c_idx = fpsb->c_status_block.status_block_index;
+		rc |= 1;
+	}
+	if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
+		fp->fp_u_idx = fpsb->u_status_block.status_block_index;
+		rc |= 2;
+	}
+	rmb(); /* TBD chack this */
+	return rc;
+}
+
+static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
+{
+	u16 used;
+	u32 prod = fp->tx_bd_prod;
+	u32 cons = fp->tx_bd_cons;
+
+	smp_mb();
+
+	used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
+		(cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
+
+	if (prod >= cons) {
+		/* used = prod - cons - prod/size + cons/size */
+		used -= NUM_TX_BD - NUM_TX_RINGS;
+	}
+
+	BUG_TRAP(used <= fp->bp->tx_ring_size);
+	BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
+	return(fp->bp->tx_ring_size - used);
+}
+
+static u16 bnx2x_ack_int(struct bnx2x *bp)
+{
+	u16 igu_addr = IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port;
+	u32 result = REG_RD(bp, igu_addr * 8, BAR_IGU_INTMEM);
+
+#ifdef IGU_DEBUG
+#warning IGU_DEBUG active
+	if (result == 0) {
+		BNX2X_ERR("read %x from IGU\n", result);
+		REG_WR(bp, GRCBASE_TIMERS, TM_REGISTERS_TIMER_SOFT_RST, 0);
+	}
+#endif
+	return result;
+}
+
+static void bnx2x_enable_int(struct bnx2x *bp)
+{
+	int port = bp->port;
+	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+	u32 val = REG_RD(bp, GRCBASE_HC, (port ? HC_REGISTERS_CONFIG_1 :
+						 HC_REGISTERS_CONFIG_0));
+
+	if (msix) {
+		val |= HC_CONFIG_0_REGISTERS_MSI_MSIX_INT_EN_0;
+		val &= ~HC_CONFIG_0_REGISTERS_SINGLE_ISR_EN_0;
+		val |= HC_CONFIG_0_REGISTERS_ATTN_BIT_EN_0;
+	} else {
+		val |= (HC_CONFIG_0_REGISTERS_INT_LINE_EN_0 |
+			HC_CONFIG_0_REGISTERS_SINGLE_ISR_EN_0 |
+			HC_CONFIG_0_REGISTERS_ATTN_BIT_EN_0);
+		val &= ~HC_CONFIG_0_REGISTERS_MSI_MSIX_INT_EN_0;
+	}
+
+	DP(NETIF_MSG_INTR, "wrote %x to HC %d msi=%d\n",
+	   val, port, msix);
+	REG_WR(bp, GRCBASE_HC, (port ? HC_REGISTERS_CONFIG_1 :
+				       HC_REGISTERS_CONFIG_0), val);
+}
+
+static void bnx2x_disable_int(struct bnx2x *bp)
+{
+	int port = bp->port;
+	u32 addr = port? HC_REGISTERS_CONFIG_1 : HC_REGISTERS_CONFIG_0;
+	u32 val = REG_RD(bp, GRCBASE_HC, addr);
+
+	val &= ~(HC_CONFIG_0_REGISTERS_MSI_MSIX_INT_EN_0 |
+		 HC_CONFIG_0_REGISTERS_INT_LINE_EN_0 |
+		 HC_CONFIG_0_REGISTERS_SINGLE_ISR_EN_0 |
+		 HC_CONFIG_0_REGISTERS_ATTN_BIT_EN_0);
+	DP(NETIF_MSG_INTR, "called, write %x to HC %d\n", val, port);
+	REG_WR(bp, GRCBASE_HC, addr, val);
+	if (REG_RD(bp, GRCBASE_HC, addr) != val) {
+		BNX2X_ERR("BUG! proper val not read from IGU!\n");
+	}
+}
+
+static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
+				u8 storm, u16 index, u8 op, u8 update)
+{
+	u16 flags;
+	u16 igu_addr = IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port;
+	u32 command;
+
+	flags = (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT) |
+		(id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT)|
+		(storm << IGU_ACK_REGISTER_STORM_ID_SHIFT)    |
+		(update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT);
+
+	command = (flags) << 16 | index;
+
+	REG_WR(bp, igu_addr * 8, BAR_IGU_INTMEM, command);
+}
+
+
+
+/*========================================================================*/
+
+/* acquire split MCP access lock register */
+static int bnx2x_lock_alr( struct bnx2x *bp)
+{
+	int rc = 0;
+	u32 i, j, val;
+
+	i = 100;
+	val = 1UL << 31;
+
+	REG_WR(bp, GRCBASE_MCP, 0x9c, val);
+	for (j = 0; j < i*10; j++) {
+		val = REG_RD(bp, GRCBASE_MCP, 0x9c);
+		if (val & (1L << 31)) {
+			break;
+		}
+
+		mdelay(5);
+	}
+
+	if (!(val & (1L << 31))) {
+		BNX2X_ERR("Cannot acquire nvram interface.\n");
+
+		rc = -EBUSY;
+	}
+
+	return rc;
+}
+
+/* Release split MCP access lock register */
+static void bnx2x_unlock_alr(struct bnx2x *bp)
+{
+	u32 val = 0;
+
+	REG_WR(bp, GRCBASE_MCP, 0x9c, val);
+}
+
+static void bnx2x_update_coalesce(struct bnx2x *bp);
+
+static void bnx2x_disable_int_sync(struct bnx2x *bp)
+{
+	int msix = (bp->flags & USING_MSIX_FLAG)? 1 : 0;
+	int i;
+
+	atomic_inc(&bp->intr_sem);
+	bnx2x_disable_int(bp);
+
+	/* Disable SP tasklet */
+	tasklet_disable(&bp->sp_task);
+
+	if (msix) {
+		for_each_queue(bp, i) {
+			synchronize_irq(bp->msix_table[i].vector);
+		}
+		/* one more for the Slow Path IRQ */
+		synchronize_irq(bp->msix_table[i].vector);
+	} else {
+		synchronize_irq(bp->pdev->irq);
+	}
+}
+
+static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+	int mode = bp->rx_mode;
+	int port = bp->port;
+	struct tstorm_eth_leading_conn_config leading_config = {0};
+
+	DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
+
+	switch (mode) {
+	case BNX2X_RX_MODE_NONE: /* no Rx */
+		leading_config.drop_flags.bits = BNX2X_NO_RX_FLAGS;
+		break;
+	case BNX2X_RX_MODE_NORMAL:
+		leading_config.drop_flags.bits = BNX2X_NORMAL_RX_FLAGS;
+		break;
+	case BNX2X_RX_MODE_ALLMULTI:
+		leading_config.drop_flags.bits = BNX2X_ALLMULTI_RX_FLAGS;
+		break;
+	case BNX2X_RX_MODE_PROMISC:
+		leading_config.drop_flags.bits = BNX2X_PROMISC_RX_FLAGS;
+		break;
+	default:
+		BNX2X_ERR("bad Rx mode (%d)\n", mode);
+	}
+	leading_config.max_buffer_size = bp->rx_buf_use_size;
+	leading_config.rss_result_mask = 0x7f;
+	leading_config.config_flags = 0xf;
+
+#ifdef BCM_VLAN
+	if (mode && bp->vlgrp) {
+		leading_config.config_flags |=
+			TSTORM_ETH_LEADING_CONN_CONFIG_ENABLE_VLAN_REMOVAL;
+		DP(NETIF_MSG_RX_STATUS, "vlan removal enabled\n");
+	}
+#else
+/* TBD leading_config.config_flags |= */
+/*     TSTORM_ETH_LEADING_CONN_CONFIG_ENABLE_VLAN_REMOVAL; */
+#endif
+
+
+	REG_WR32(bp, TSTORM_LEADING_CONN_CONFIG(port), BAR_TSTRORM_INTMEM,
+		 U64_LO(*(u64 *)&leading_config));
+	REG_WR32(bp, TSTORM_LEADING_CONN_CONFIG(port)+4, BAR_TSTRORM_INTMEM,
+		 U64_HI(*(u64 *)&leading_config));
+}
+
+/* the slow path queue is odd since completions arrive on the fastpath ring */
+static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+			 u32 data_hi, u32 data_lo, int common)
+{
+	int port = bp->port;
+
+	DP(NETIF_MSG_TIMER,
+	   "spe=%x,%x command=%x  hw_cid=%x  data=%x:%x  left=%x\n",
+	   (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
+	   (void *)bp->spq_prod_bd - (void *)bp->spq), command,
+	   HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		return -EIO;
+	}
+#endif
+
+	spin_lock(&bp->spq_lock);
+
+	if (!bp->spq_left ||
+	    (((bp->spq_prod_idx - bp->spq_con_idx)
+	      & 0xfff) == MAX_SP_DESC_CNT)) {
+
+		BNX2X_ERR("BUG! SPQ ring full!\n");
+		spin_unlock(&bp->spq_lock);
+		bnx2x_panic();
+		return -EBUSY;
+	}
+	/* CID needs port number to be encoded int it */
+	bp->spq_prod_bd->hdr.conn_and_cmd_data =
+		((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(bp, cid));
+	bp->spq_prod_bd->hdr.type = ETH_CONNECTION_TYPE;
+	if (common)
+		bp->spq_prod_bd->hdr.type |=
+					(1 << SPE_HDR_COMMON_RAMROD_SHIFT);
+
+	bp->spq_prod_bd->data.mac_config_addr.hi = data_hi;
+	bp->spq_prod_bd->data.mac_config_addr.lo = data_lo;
+
+	bp->spq_left--;
+
+	if (bp->spq_prod_bd == bp->spq_last_bd) {
+		bp->spq_prod_bd = bp->spq;
+		bp->spq_prod_idx = 0;
+		DP(NETIF_MSG_TIMER, "end of spq\n");
+	} else {
+		bp->spq_prod_bd++;
+		bp->spq_prod_idx++;
+	}
+
+	REG_WR32(bp, BAR_XSTRORM_INTMEM, XSTORM_SPQ_PROD_OFFSET(port),
+		 bp->spq_prod_idx);
+
+	spin_unlock(&bp->spq_lock);
+	return 0;
+}
+/* Called with rtnl_lock from vlan functions and also detif_tx_lock
+ * from set_multicast.
+ */
+static void bnx2x_set_rx_mode(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
+
+	DP(NETIF_MSG_RX_STATUS, "called dev->flags = %x\n", dev->flags);
+
+	if (dev->flags & IFF_PROMISC) {
+		rx_mode = BNX2X_RX_MODE_PROMISC;
+	} else if (dev->flags & IFF_ALLMULTI ||
+		   dev->mc_count > BNX2X_MAX_MULTICAST) {
+		rx_mode = BNX2X_RX_MODE_ALLMULTI;
+	} else { /* some multicasts */
+
+		int i, old, offset;
+		struct dev_mc_list *mclist;
+		struct mac_configuration_cmd *config =
+			bnx2x_sp(bp, mcast_config);
+
+		if (CHIP_REV_IS_SLOW(bp))
+			offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
+		else
+			offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
+
+		/* primary MAC */
+		for (i = 0, mclist = dev->mc_list;
+		     mclist && i < dev->mc_count;
+		     i++, mclist = mclist->next) {
+
+			config->config_table[i].cam_entry.msb_mac_addr =
+				swab32(*(u32 *)&mclist->dmi_addr[0]);
+			config->config_table[i].cam_entry.lsb_mac_addr =
+				swab16(*(u16 *)&mclist->dmi_addr[4]);
+			config->config_table[i].cam_entry.flags = bp->port;
+			config->config_table[i].target_table_entry.vlan_tag =
+				0;
+			config->config_table[i].target_table_entry.flags =
+				TSTORM_CAM_TAGET_TABLE_ENTRY_ADD_TO_NIC_STAT;
+
+			DP(NETIF_MSG_RX_STATUS, "setting MCAST[%d] (%x:%x)\n",
+			   i, config->config_table[i].cam_entry.msb_mac_addr,
+			   config->config_table[i].cam_entry.lsb_mac_addr);
+		}
+		old = config->hdr.length;
+		if (old > i) {
+			for (; i < old; i++) {
+				if (CAM_IS_INVALID(config->config_table[i])) {
+					i--; /* already invalidated */
+					break;
+				}
+				/* invalidate */
+				config->config_table[i]
+					.target_table_entry.vlan_tag = 0;
+				config->config_table[i]
+					.target_table_entry.flags = 1;
+			}
+		}
+		config->hdr.length = i;
+		config->hdr.offset = offset;
+		config->hdr.reserved0 = 0;
+		config->hdr.reserved1 = 0;
+
+		bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+			      U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
+			      U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
+	}
+
+	bp->rx_mode = rx_mode;
+
+	bnx2x_set_storm_rx_mode(bp);
+}
+
+static void bnx2x_netif_stop(struct bnx2x *bp)
+{
+	bp->rx_mode = BNX2X_RX_MODE_NONE;
+	bnx2x_set_storm_rx_mode(bp);
+	bnx2x_disable_int_sync(bp);
+	bnx2x_link_reset(bp);
+	if (netif_running(bp->dev)) {
+		netif_poll_disable(bp->dev);
+		netif_tx_disable(bp->dev);
+		bp->dev->trans_start = jiffies;	/* prevent tx timeout */
+	}
+}
+
+static void bnx2x_free_mem(struct bnx2x *bp)
+{
+
+#define BNX2X_PCI_FREE(x, y, size) do { \
+	if (x) { \
+		pci_free_consistent(bp->pdev, size, x, y); \
+		x = NULL; \
+	} \
+} while (0)
+
+#define BNX2X_KFREE(x) do { \
+	if (x) { \
+		vfree(x); \
+		x = NULL; \
+	} \
+} while (0)
+
+	int i;
+
+	/* fastpath */
+	for_each_queue(bp, i) {
+
+		/* Status blocks */
+		BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
+			       bnx2x_fp(bp, i, status_blk_mapping),
+			       sizeof(struct host_status_block)
+			       + sizeof(struct eth_tx_db_data));
+
+		/* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
+		BNX2X_KFREE(bnx2x_fp(bp, i, tx_buf_ring));
+		BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
+			       bnx2x_fp(bp, i, tx_desc_mapping),
+			       sizeof(struct eth_tx_bd) * NUM_TX_BD);
+
+		BNX2X_KFREE(bnx2x_fp(bp, i, rx_buf_ring));
+		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
+			       bnx2x_fp(bp, i, rx_desc_mapping),
+			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
+			       bnx2x_fp(bp, i, rx_comp_mapping),
+			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
+	}
+
+	BNX2X_KFREE( bp->fp);
+	/* end of fast path*/
+
+	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+		       (sizeof(struct host_def_status_block)));
+
+	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+		       (sizeof(struct bnx2x_slowpath)));
+
+	if (iscsi_active) {
+		BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
+		BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
+		BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
+		BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
+	}
+
+	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
+
+#undef BNX2X_PCI_FREE
+#undef BNX2X_KFREE
+}
+
+static int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+	int i;
+
+#define BNX2X_PCI_ALLOC(x, y, size) do { \
+	x = pci_alloc_consistent(bp->pdev, size, y); \
+	if (x == NULL) \
+		goto alloc_mem_err; \
+	memset(x, 0, size); \
+} while (0)
+
+#define BNX2X_ALLOC(x, size) do { \
+	x = vmalloc(size); \
+	if (x == NULL) \
+		goto alloc_mem_err; \
+	memset(x, 0, size); \
+} while (0)
+
+	/* fastpath */
+	BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath)*bp->num_queues);
+
+	for_each_queue(bp, i) {
+
+		bnx2x_fp(bp, i, bp) = bp;
+
+		/* Status blocks */
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
+				&bnx2x_fp(bp, i, status_blk_mapping),
+				sizeof(struct host_status_block) +
+				sizeof(struct eth_tx_db_data));
+
+		bnx2x_fp(bp, i, tx_prods_mapping) =
+				bnx2x_fp(bp, i, status_blk_mapping) +
+				sizeof(struct host_status_block);
+
+		bnx2x_fp(bp, i, hw_tx_prods) =
+				(void *)(bnx2x_fp(bp, i, status_blk) + 1);
+
+		/* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
+		BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
+				sizeof(struct sw_tx_bd) * NUM_TX_BD);
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
+				&bnx2x_fp(bp, i, tx_desc_mapping),
+				sizeof(struct eth_tx_bd) * NUM_TX_BD);
+
+		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
+				sizeof(struct sw_rx_bd) * NUM_RX_BD);
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
+				&bnx2x_fp(bp, i, rx_desc_mapping),
+				sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
+				&bnx2x_fp(bp, i, rx_comp_mapping),
+				sizeof(struct eth_rx_bd) * NUM_RX_BD);
+	}
+	/* end of fast path*/
+
+	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
+			sizeof(struct host_def_status_block));
+
+	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
+			sizeof(struct bnx2x_slowpath));
+
+	if (iscsi_active) {
+		BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
+
+		/* Initialize T1 */
+		for (i = 0; i < 64*1024; i += 64) {
+			*(u64 *)((char *)bp->t1 + i+56) = 0x0UL;
+			*(u64 *)((char *)bp->t1 + i+3) = 0x0UL;
+		}
+
+		/* allocate searcher T2 table
+		   we allocate 1/4 of alloc num for T2
+		  (which is not entered into the ILT) */
+		BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
+
+		/* Initialize T2 */
+		for (i = 0; i < 16*1024; i += 64) {
+			*(u64 *)((char *)bp->t2 + i+56) =
+				bp->t2_mapping + i + 64;
+		}
+
+		/* now sixup the last line in the block
+		 * to point to the next block
+		 */
+		*(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
+
+		/* Timer block array (MAX_CONN*8)
+		 * phys uncached for now 1024 conns
+		 */
+		BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
+
+		/* QM queues (128*MAX_CONN) */
+		BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
+	}
+
+	/* Slow path ring */
+	BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, PAGE_SIZE);
+
+	return 0;
+
+alloc_mem_err:
+	bnx2x_free_mem(bp);
+	return -ENOMEM;
+
+#undef BNX2X_PCI_ALLOC
+#undef BNX2X_ALLOC
+}
+
+
+
+static void bnx2x_set_mac_addr(struct bnx2x *bp)
+{
+	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
+
+	/* CAM allocation
+	 * unicasts 0-31:port0 32-63:port1
+	 * multicast 64-127:port0 128-191:port1
+	 */
+	config->hdr.length = 2;
+	config->hdr.offset = bp->port ? 31 : 0;
+	config->hdr.reserved0 = 0;
+	config->hdr.reserved1 = 0;
+
+	/* primary MAC */
+	config->config_table[0].cam_entry.msb_mac_addr =
+		ntohl(*(u32 *)&bp->dev->dev_addr[0]);
+	config->config_table[0].cam_entry.lsb_mac_addr =
+		ntohs(*(u16 *)&bp->dev->dev_addr[4]);
+	config->config_table[0].cam_entry.flags = bp->port;
+	config->config_table[0].target_table_entry.vlan_tag = 0;
+	config->config_table[0].target_table_entry.flags =
+		TSTORM_CAM_TAGET_TABLE_ENTRY_ADD_TO_NIC_STAT;
+	DP(NETIF_MSG_RX_STATUS, "setting MAC (%x:%x)\n",
+	   config->config_table[0].cam_entry.msb_mac_addr,
+	   config->config_table[0].cam_entry.lsb_mac_addr);
+
+	/* broadcast*/
+	config->config_table[1].cam_entry.msb_mac_addr = 0xffffffff;
+	config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
+	config->config_table[1].cam_entry.flags = bp->port;
+	config->config_table[1].target_table_entry.vlan_tag = 0;
+	config->config_table[1].target_table_entry.flags =
+				(TSTORM_CAM_TAGET_TABLE_ENTRY_BROADCAST |
+				 TSTORM_CAM_TAGET_TABLE_ENTRY_ADD_TO_NIC_STAT);
+
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
+		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+}
+
+static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
+				     struct bnx2x_fastpath *fp, u16 index)
+{
+	struct sk_buff *skb;
+	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
+	struct eth_rx_bd *rxbd = &fp->rx_desc_ring[index];
+	dma_addr_t mapping;
+
+	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+
+	if (skb == NULL) {
+		return -ENOMEM;
+	}
+
+	mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
+				 PCI_DMA_FROMDEVICE);
+
+	rx_buf->skb = skb;
+	pci_unmap_addr_set(rx_buf, mapping, mapping);
+#ifdef BNX2X_STOP_ON_ERROR
+	rxbd->reserved = fp->last_alloc++;
+#else
+	rxbd->reserved = 0;
+#endif
+	rxbd->len = bp->rx_buf_use_size;
+	rxbd->addr_hi = U64_HI(mapping);
+	rxbd->addr_lo = U64_LO(mapping);
+	return 0;
+}
+
+/* free skb in the packet ring at pos idx
+   return idx of last bd freed */
+static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			     u16 idx)
+{
+	struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
+	struct eth_tx_bd *tx_bd;
+	struct sk_buff *skb = tx_buf->skb;
+	u16 bd_idx = tx_buf->first_bd;
+	int nbd;
+
+	DP(BNX2X_MSG_OFF, "pkt_idx=%d buff@(%p)->skb=%p\n", idx, tx_buf, skb);
+
+	/* unmap first bd */
+	DP(BNX2X_MSG_OFF, "free bd_idx=%d\n", bd_idx);
+	tx_bd = &fp->tx_desc_ring[bd_idx];
+	pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
+			 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
+
+	nbd = tx_bd->nbd -1;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (nbd > (MAX_SKB_FRAGS + 2)) {
+		BNX2X_ERR("bad nbd!\n");
+		bnx2x_panic();
+	}
+#endif
+
+	/* Skip a parse bd and the TSO split header bd
+	   since they have no mapping */
+	if (nbd)
+		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+	if (tx_bd->bd_flags.as_bitfield
+	    &(ETH_TX_BD_FLAGS_IP_CSUM | ETH_TX_BD_FLAGS_TCP_CSUM
+	      | ETH_TX_BD_FLAGS_SW_LSO)) {
+
+		if (--nbd)
+			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+		tx_bd = &fp->tx_desc_ring[bd_idx];
+		/* is this a TSO heder split bd? */
+		if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
+			if (--nbd)
+				bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+		}
+	}
+
+	/* now free frags */
+	while (nbd > 0) {
+
+		DP(BNX2X_MSG_OFF, "free frag bd_idx=%d\n", bd_idx);
+		tx_bd = &fp->tx_desc_ring[bd_idx];
+		pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
+			       BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
+		if (--nbd)
+			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
+	/* release skb */
+	BUG_TRAP(skb);
+	dev_kfree_skb(skb);
+	tx_buf->first_bd = 0;
+	tx_buf->skb = NULL;
+
+	return bd_idx;
+}
+
+static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
+	int done = 0;
+
+	hw_cons = *fp->tx_cons_sb;
+	sw_cons = fp->tx_pkt_cons;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		return;
+	}
+#endif
+
+	while (sw_cons != hw_cons) {
+		u16 pkt_cons;
+
+		pkt_cons = TX_BD(sw_cons);
+
+		/* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
+
+		DP(NETIF_MSG_TX_DONE, "sw_cons=%u  hw_cons=%u pkt_cons=%d\n",
+		   sw_cons, hw_cons, pkt_cons);
+
+/*        if (NEXT_TX_IDX(sw_cons)!=hw_cons){
+	    rmb();
+	    prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
+	}
+*/
+
+		bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
+		sw_cons++;
+		done++;
+
+		if (done == work) {
+			break;
+		}
+	}
+
+	fp->tx_pkt_cons = sw_cons;
+	fp->tx_bd_cons = bd_cons;
+
+	smp_mb();
+
+	/* TBD need a thresh?  */
+	if (unlikely(netif_queue_stopped(bp->dev))) {
+		netif_tx_lock(bp->dev);
+		if ((netif_queue_stopped(bp->dev)) &&
+		    (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) {
+			netif_wake_queue(bp->dev);
+		}
+		netif_tx_unlock(bp->dev);
+	}
+}
+
+
+static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
+			   union eth_rx_cqe *rr_cqe)
+{
+	struct bnx2x *bp = fp->bp;
+	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+	int command = (rr_cqe->ramrod_cqe.conn_and_cmd_data) >>
+		      COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "fp %d, cid %d, got ramrod #%d state is %X type is %d\n",
+	   fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
+
+	bp->spq_left++;
+	
+	switch (command | bp->state) {
+	case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
+		DP(NETIF_MSG_IFUP, "got setup ramrod\n");
+		bp->state = BNX2X_STATE_OPEN;
+		break;
+	case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
+		DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
+		bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
+		fp->state = BNX2X_FP_STATE_HALTED;
+		break;
+	case (RAMROD_CMD_ID_ETH_LEADING_CFC_DEL |
+	      BNX2X_STATE_CLOSING_WAIT4_DELETE):
+		DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
+		bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
+		break;
+	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
+		DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
+		break;
+	default:
+		BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
+			  command, bp->state);
+	}
+}
+
+static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+				      struct sk_buff *skb, u16 cons, u16 prod)
+{
+	struct bnx2x *bp = fp->bp;
+	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
+	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+
+	pci_dma_sync_single_for_device(bp->pdev,
+				       pci_unmap_addr(cons_rx_buf, mapping),
+				       bp->rx_offset + RX_COPY_THRESH,
+				       PCI_DMA_FROMDEVICE);
+
+	prod_rx_buf->skb = cons_rx_buf->skb;
+	pci_unmap_addr_set(prod_rx_buf, mapping,
+			   pci_unmap_addr(cons_rx_buf, mapping));
+	*prod_bd = *cons_bd;
+#ifdef BNX2X_STOP_ON_ERROR
+	prod_bd->reserved = fp->last_alloc++;
+#endif
+}
+
+static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 bd_cons, bd_prod, comp_ring_prod, comp_ring_cons;
+	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
+	int rx_pkt = 0;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		return 0;
+	}
+#endif
+
+	hw_comp_cons = *fp->rx_cons_sb;
+	if ((hw_comp_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
+		hw_comp_cons++;
+	}
+
+	bd_cons = fp->rx_bd_cons;
+	bd_prod = fp->rx_bd_prod;
+	sw_comp_cons = fp->rx_comp_cons;
+	sw_comp_prod = fp->rx_comp_prod;
+
+
+    /* Memory barrier necessary as speculative reads of the rx
+     * buffer can be ahead of the index in the status block
+     */
+	rmb();
+
+	DP(NETIF_MSG_RX_STATUS, "queue[%d] sw_comp_cons=%u  hw_comp_cons=%u\n",
+	   fp->index, sw_comp_cons, hw_comp_cons);
+
+	while (sw_comp_cons != hw_comp_cons) {
+		unsigned int len, pad;
+		struct sw_rx_bd *rx_buf;
+		struct sk_buff *skb;
+		union eth_rx_cqe *cqe;
+
+		comp_ring_prod = RX_BD(sw_comp_prod);
+		comp_ring_cons = RX_BD(sw_comp_cons);
+		bd_prod = RX_BD(bd_prod);
+		bd_cons = RX_BD(bd_cons);
+
+		/* prefetch(&fp->rx_buf_ring[bd_prod]); */
+		cqe =  &fp->rx_comp_ring[comp_ring_cons];
+
+	DP(NETIF_MSG_RX_STATUS, "hw_comp_cons=%u, sw_comp_cons=%u "
+	   "comp_ring(%u,%u) bd_ring(%u,%u)\n",
+	   hw_comp_cons, sw_comp_cons, comp_ring_prod,
+	   comp_ring_cons, bd_prod, bd_cons);
+
+	DP(NETIF_MSG_RX_STATUS, "CQE type %x, err %x, status %x, queue %x,"
+	   " vlan %x, len %x, csum %x\n", cqe->fast_path_cqe.type,
+	   cqe->fast_path_cqe.error_flags, cqe->fast_path_cqe.status_flags,
+	   cqe->fast_path_cqe.rss_hash_result, cqe->fast_path_cqe.vlan_tag,
+	   cqe->fast_path_cqe.pkt_len, cqe->fast_path_cqe.tcp_csum);
+
+
+		/* is this a slowpath msg? */
+		if (unlikely(cqe->fast_path_cqe.type)) {
+
+			bnx2x_sp_event(fp, cqe);
+
+			goto next_cqe;
+
+		/* this is an rx packet */
+		} else {
+			rx_buf = &fp->rx_buf_ring[bd_cons];
+			skb = rx_buf->skb;
+
+			len = cqe->fast_path_cqe.pkt_len;
+			pad = cqe->fast_path_cqe.placement_offset;
+
+			pci_dma_sync_single_for_device(bp->pdev,
+					pci_unmap_addr(rx_buf, mapping),
+						       pad + RX_COPY_THRESH,
+						       PCI_DMA_FROMDEVICE);
+
+			prefetch(skb);
+			prefetch(((char *)(skb))+128);
+/*
+	    if (NEXT_RX_IDX(sw_comp_cons) !=  hw_comp_cons){
+		prefetch(bp->rx_buf_ring[NEXT_RX_IDX(bd_cons)].skb);
+		prefetch((char*)(bp->rx_buf_ring[NEXT_RX_IDX(bd_cons)].skb)
+			+128);
+	    }
+*/
+
+#ifdef BNX2X_STOP_ON_ERROR
+			BUG_TRAP(fp->rx_desc_ring[bd_cons].reserved ==
+				 fp->next_free);
+			fp->next_free++;
+#endif
+			/* is this an error packet? */
+			if (unlikely(cqe->fast_path_cqe.error_flags)) {
+			/* do we sometimes forward error packets anyway? */
+				DP(NETIF_MSG_RX_ERR,
+				   "ERROR flags(%u) Rx packet(%u)\n",
+				   cqe->fast_path_cqe.error_flags,
+				   sw_comp_cons);
+				/* TBD make sure MC counts this as a drop */
+				goto reuse_rx;
+			}
+
+		       /* Since we don't have a jumbo ring, copy small packets
+			* if mtu > 1500
+			*/
+			if ((bp->dev->mtu > 1500) &&
+			    (len <= RX_COPY_THRESH)) {
+				struct sk_buff *new_skb;
+
+				new_skb = netdev_alloc_skb(bp->dev,
+							   len + pad);
+				if (new_skb == NULL) {
+					DP(NETIF_MSG_RX_ERR,
+					   "ERROR packet dropped "
+					   "because of alloc failure\n");
+					/* TBD count this as a drop? */
+					goto reuse_rx;
+				}
+
+				skb_copy_from_linear_data_offset(skb, pad,
+								 new_skb->data
+								 + pad, len);
+
+				skb_reserve(new_skb, pad);
+				skb_put(new_skb, len);
+
+				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+
+				skb = new_skb;
+			} else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
+				pci_unmap_single(bp->pdev,
+						 pci_unmap_addr(rx_buf,
+								mapping),
+						 bp->rx_buf_use_size,
+						 PCI_DMA_FROMDEVICE);
+
+				skb_reserve(skb, pad);
+				skb_put(skb, len);
+			} else {
+				DP(NETIF_MSG_RX_ERR,
+				   "ERROR packet dropped because "
+				   "of alloc failure\n");
+reuse_rx:
+				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+				goto next_rx;
+			}
+
+			skb->protocol = eth_type_trans(skb, bp->dev);
+
+			skb->ip_summed = CHECKSUM_NONE;
+			if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+			/* TBD do we pass bad csum packets on promisc?
+			 * if (unlikely(cqe->fast_path_cqe.error_flags))
+			 *                 skb->ip_summed = CHECKSUM_NONE;
+			 */
+
+		}
+
+#ifdef BCM_VLAN
+		if ((cqe->fast_path_cqe.pars_flags.flags
+		     & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
+		    && (bp->vlgrp != 0)) {
+			vlan_hwaccel_receive_skb(skb, bp->vlgrp,
+						 cqe->fast_path_cqe.vlan_tag);
+		} else
+#endif
+			netif_receive_skb(skb);
+
+		bp->dev->last_rx = jiffies;
+
+next_rx:
+		rx_buf->skb = NULL;
+
+		bd_cons = NEXT_RX_IDX(bd_cons);
+		bd_prod = NEXT_RX_IDX(bd_prod);
+next_cqe:
+		sw_comp_prod = NEXT_RX_IDX(sw_comp_prod);
+		sw_comp_cons = NEXT_RX_IDX(sw_comp_cons);
+		rx_pkt++;
+
+
+		if ((rx_pkt == budget))
+			break;
+
+		/* Refresh hw_cons to see if there is new work */
+		if (sw_comp_cons == hw_comp_cons) {
+			hw_comp_cons = *fp->rx_cons_sb;
+
+			if ((hw_comp_cons & MAX_RX_DESC_CNT) ==
+			    MAX_RX_DESC_CNT) {
+				hw_comp_cons++;
+			}
+
+			/* TBD do we update rcq prod insied the while? */
+			REG_WR(bp, BAR_TSTRORM_INTMEM,
+			       TSTORM_RCQ_PROD_OFFSET(bp->port, 0),
+			       sw_comp_prod);
+			rmb();
+
+		} else if (!(rx_pkt % 64)) {  /* TBD */
+			REG_WR(bp, BAR_TSTRORM_INTMEM,
+			       TSTORM_RCQ_PROD_OFFSET(bp->port, 0),
+			       sw_comp_prod);
+		}
+
+	} /* while */
+
+	fp->rx_bd_cons = bd_cons;
+	fp->rx_bd_prod = bd_prod;
+	fp->rx_comp_cons = sw_comp_cons;
+	fp->rx_comp_prod = sw_comp_prod;
+
+	REG_WR(bp, BAR_TSTRORM_INTMEM,
+	       TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
+
+	mmiowb();
+
+	fp->rx_pkt += rx_pkt;
+	fp->rx_calls++;
+	return rx_pkt;
+}
+
+/* state_p is a pointer to state field, which is checked for a change */
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+			     int *state_p, int poll)
+{
+	int j = 0;
+
+	/* DP("waiting for state to become %d on IDX [%d]\n",
+	state, sb_idx); */
+
+	while (*state_p != state) {
+		msleep(1);
+		if (poll) {
+		/* Get ramrod "send" completion (will only update producer) */
+			bnx2x_rx_int(bp->fp, 10);
+			/* If index is different from 0 -
+			 * call for interrupt handling routine for the
+			 * appropriate queue to get the ramrod completion */
+		}
+		if (j++ == 5000) {
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+static void bnx2x_attn_int(struct bnx2x *bp)
+{
+	int port = bp->port;
+
+	/* read local copy of bits */
+	u16 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
+	u16 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
+	u16 attn_state = bp->attn_state;
+
+	/* look for changed bits */
+	u16 asserted   =  attn_bits & ~attn_ack & ~attn_state;
+	u16 deasserted = ~attn_bits &  attn_ack &  attn_state;
+
+	DP(NETIF_MSG_HW,
+	   "attn_bits %x, attn_ack %x, asserted %x, deasserted %x \n",
+	   attn_bits, attn_ack, asserted, deasserted);
+
+	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
+		BNX2X_ERR("bad attention state\n");
+	}
+
+	/* handle bits that were raised */
+	if (asserted) {
+		u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET +
+				IGU_PORT_BASE * port) * 8;
+		u32 aeu_addr = port ? MISC_REGISTERS_AEU_MASK_ATTN_FUNC_1 :
+				      MISC_REGISTERS_AEU_MASK_ATTN_FUNC_0;
+		u32 nig_mask_addr = port ? NIG_REGISTERS_MASK_INTERRUPT_PORT1
+					 : NIG_REGISTERS_MASK_INTERRUPT_PORT0;
+
+		if (~bp->aeu_mask & (asserted & 0xff))
+			BNX2X_ERR("IGU ERROR\n");
+		if (bp->attn_state & asserted)
+			BNX2X_ERR("IGU ERROR\n");
+
+		DP(NETIF_MSG_HW, "aeu_mask %x, newly asserted %x\n",
+		   bp->aeu_mask, asserted);
+		bp->aeu_mask &= ~(asserted & 0xff);
+		DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
+
+		REG_WR(bp, GRCBASE_MISC_AEU, aeu_addr, bp->aeu_mask);
+
+		bp->attn_state |= asserted;
+
+		if (asserted & ATTN_HARD_WIRED_MASK) {
+			if (asserted & ATTN_NIG_FOR_FUNC0) {
+		/* this is for checking the RBC interrupt */
+		/* REG_RD(pdev, GRCBASE_NIG, NIG_REGISTERS_NIG_INT_STS_CLR_0,
+		  &val); */
+				u32 nig_status_port;
+				u32 nig_int_addr = port ?
+					NIG_REGISTERS_STATUS_INTERRUPT_PORT1 :
+					NIG_REGISTERS_STATUS_INTERRUPT_PORT0;
+
+
+				bp->nig_mask = REG_RD(bp, GRCBASE_NIG,
+						      nig_mask_addr);
+				REG_WR(bp, GRCBASE_NIG, nig_mask_addr, 0);
+
+				nig_status_port = REG_RD(bp, GRCBASE_NIG,
+							 nig_int_addr);
+				bnx2x_link_update(bp);
+				/* handle unicore attn? */
+				/* NIG_WR(nig_mask_addr, 0x0ul);
+				nig_status_port); */
+			} /* NIG ATTN */
+			if (asserted & ATTN_SW_TIMER_4_FUNC0) {
+				DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
+			}
+			if (asserted & GPIO_2_FUNC0) {
+				DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
+			}
+			if (asserted & GPIO_3_FUNC0) {
+				DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+			}
+			if (asserted & GPIO_4_FUNC0) {
+				DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+			}
+			if (asserted & ATTN_GENERAL_ATTN_1) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+				REG_WR(bp, GRCBASE_MISC,
+				       MISC_REGISTERS_AEU_GENERAL_ATTN_1,
+				       0x0);
+				/* TBD assert and dump */
+			}
+			if (asserted & ATTN_GENERAL_ATTN_2) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+			}
+			if (asserted & ATTN_GENERAL_ATTN_3) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+			}
+		} /* if hardwired */
+
+		DP(NETIF_MSG_HW, "about to mask %x at %x\n", asserted,
+		   igu_addr + BAR_IGU_INTMEM);
+		REG_WR(bp, BAR_IGU_INTMEM, igu_addr, asserted);
+		/* now set back the mask */
+		if (asserted & (ATTN_NIG_FOR_FUNC0 | ATTN_NIG_FOR_FUNC1)) {
+			REG_WR(bp, GRCBASE_NIG, nig_mask_addr, bp->nig_mask);
+		}
+
+	} /* if asserted */
+
+	if (deasserted) {
+		u32 val;
+		u16 reg_addr;
+		int index;
+		struct attn_route attn;
+		struct attn_route group_mask;
+
+		/* need to take HW lock because MCP or other port might also
+		   try to handle this event */
+		bnx2x_lock_alr(bp);
+
+		attn.sig[0] = REG_RD(bp, GRCBASE_MISC_AEU,
+				    MISC_REGISTERS_AEU_AFTER_INVERT_1_FUNC_0 +
+				     port*4);
+
+		attn.sig[1] = REG_RD(bp, GRCBASE_MISC_AEU,
+				    MISC_REGISTERS_AEU_AFTER_INVERT_2_FUNC_0 +
+				     port*4);
+
+		attn.sig[2] = REG_RD(bp, GRCBASE_MISC_AEU,
+				    MISC_REGISTERS_AEU_AFTER_INVERT_3_FUNC_0 +
+				     port*4);
+
+		attn.sig[3] = REG_RD(bp, GRCBASE_MISC_AEU,
+				    MISC_REGISTERS_AEU_AFTER_INVERT_4_FUNC_0 +
+				     port*4);
+
+		DP(NETIF_MSG_HW, "attn = %llx\n",
+		   (unsigned long long)attn.sig[0]);
+
+		for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+			if (deasserted & (1 << index)) {
+				group_mask = bp->attn_group[index];
+
+				DP(NETIF_MSG_HW, "group %d =(%llx)\n", index,
+				   (unsigned long long)group_mask.sig[0]);
+
+				if (attn.sig[3] & group_mask.sig[3] &
+				    EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+					if (BNX2X_MC_ASSERT_BITS &
+					    attn.sig[3]) {
+
+						BNX2X_ERR("MC assert!\n");
+						bnx2x_panic();
+
+					} else if (BNX2X_MCP_ASSERT &
+						   attn.sig[3]) {
+
+						BNX2X_ERR("MCP assert!\n");
+						REG_WR(bp, GRCBASE_MISC_AEU,
+					MISC_REGISTERS_AEU_GENERAL_ATTN_11, 0);
+
+						bnx2x_mc_assert(bp);
+
+					} else {
+						BNX2X_ERR("UNKOWEN HW "
+							  "ASSERT!\n");
+					}
+				}
+
+				if (attn.sig[1] & group_mask.sig[1] &
+				    BNX2X_DOORQ_ASSERT) {
+
+					val = REG_RD(bp, GRCBASE_DQ,
+					     DORQ_REGISTERS_DORQ_INT_STS_CLR);
+					BNX2X_ERR("DB hw attention 0x%x\n",
+						  val);
+					/* DORQ discard attention */
+					if (val & 0x2) {
+						BNX2X_ERR("FATAL error "
+							  "from DORQ\n");
+					}
+				}
+
+				if (attn.sig[2] & group_mask.sig[2] &
+				    AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+
+					val = REG_RD(bp, GRCBASE_PXP,
+					     PXP_REGISTERS_PXP_INT_STS_CLR_0);
+					BNX2X_ERR("PXP hw attention 0x%x\n",
+						  val);
+					/* RQ_USDMDP_FIFO_OVERFLOW */
+					if (val & 0x18000) {
+						BNX2X_ERR("FATAL error from"
+							  " PXP\n");
+					}
+				}
+
+				if (attn.sig[3] & group_mask.sig[3] &
+				    EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+
+					REG_WR(bp, GRCBASE_MISC_AEU,
+					  MISC_REGISTERS_AEU_CLR_LATCH_SIGNAL,
+					       0x7ff);
+					DP(NETIF_MSG_HW,
+					   "got latched bits %X\n",
+					   attn.sig[3]);
+				}
+			}
+		}
+
+		bnx2x_unlock_alr(bp);
+
+		reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port)*8;
+
+		val = ~deasserted;
+		REG_WR32(bp, reg_addr, BAR_IGU_INTMEM, val);
+
+		if (bp->aeu_mask & (deasserted & 0xff))
+			BNX2X_ERR("IGU BUG\n");
+		if (~bp->attn_state & deasserted)
+			BNX2X_ERR("IGU BUG\n");
+
+		reg_addr = port ? MISC_REGISTERS_AEU_MASK_ATTN_FUNC_1 :
+				  MISC_REGISTERS_AEU_MASK_ATTN_FUNC_0;
+
+		DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
+
+		bp->aeu_mask |= (deasserted & 0xff);
+
+		DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
+
+		REG_WR(bp, GRCBASE_MISC_AEU, reg_addr, bp->aeu_mask);
+
+		DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+
+		bp->attn_state &= ~deasserted;
+
+		DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+
+	} /* deasserted */
+}
+
+static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
+{
+	u16 rx_cons_sb = *fp->rx_cons_sb;
+
+	if ((rx_cons_sb & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
+		rx_cons_sb++;
+
+	if ((rx_cons_sb != fp->rx_comp_cons) ||
+	    (*fp->tx_cons_sb != fp->tx_pkt_cons))
+		return 1;
+
+	return 0;
+}
+
+static void bnx2x_sp_task(unsigned long arg)
+{
+
+	struct bnx2x *bp = (struct bnx2x *)arg;
+	u16 status;
+
+	/* Return here if interrupt is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+		DP(NETIF_MSG_INTR, "called but int_sem not 0, returning. \n");
+		return;
+	}
+
+
+	status = bnx2x_update_dsb_idx(bp);
+	if (status == 0) {
+		BNX2X_ERR("spurious slowpath interrupt!\n");
+	}
+
+	DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated=%x)\n", status);
+
+	if (status & 0x1) {
+		/* HW attentions */
+		bnx2x_attn_int(bp);
+	}
+
+	/* CStorm events: query_stats, cfc delete ramrods */
+	if (status & 0x2) {
+		bp->stat_pending = 0;
+	}
+
+	if (status & 0x8) {  /* spq consumer update */
+		bp->spq_con_idx = *bp->spq_hw_con;
+		DP(NETIF_MSG_INTR, "spq (%d,%d)\n",
+		   bp->spq_prod_idx, bp->spq_con_idx);
+	}
+
+	bnx2x_ack_sb(bp, 16, ATTENTION_ID, bp->def_att_idx , IGU_INT_NOP, 1);
+	bnx2x_ack_sb(bp, 16, USTORM_ID, bp->def_u_idx , IGU_INT_NOP, 1);
+	bnx2x_ack_sb(bp, 16, CSTORM_ID, bp->def_c_idx , IGU_INT_NOP, 1);
+	bnx2x_ack_sb(bp, 16, XSTORM_ID, bp->def_x_idx , IGU_INT_NOP, 1);
+	bnx2x_ack_sb(bp, 16, TSTORM_ID, bp->def_t_idx , IGU_INT_ENABLE, 1);
+}
+
+static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+{
+	struct net_device *dev = dev_instance;
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* Return here if interrupt is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+		DP(NETIF_MSG_INTR, "called but int_sem not 0, returning. \n");
+		return IRQ_HANDLED;
+	}
+
+
+	bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		return IRQ_HANDLED;
+	}
+#endif
+
+	tasklet_schedule(&bp->sp_task);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+{
+
+	struct bnx2x_fastpath *fp = fp_cookie;
+	struct bnx2x *bp = fp->bp;
+	struct net_device *dev = bp->dev;
+
+	DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", fp->index);
+	bnx2x_ack_sb(bp, fp->index, USTORM_ID, 0 , IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		return IRQ_HANDLED;
+	}
+#endif
+
+	prefetch(fp->rx_cons_sb);
+	prefetch(fp->tx_cons_sb);
+	prefetch(&fp->status_blk->c_status_block.status_block_index);
+	prefetch(&fp->status_blk->u_status_block.status_block_index);
+	netif_rx_schedule(dev);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+{
+	struct net_device *dev = dev_instance;
+	struct bnx2x *bp = netdev_priv(dev);
+	u16 status = bnx2x_ack_int(bp);
+
+	if (unlikely(status == 0)) {
+		DP(NETIF_MSG_INTR, "not our interrupt!\n");
+		return IRQ_NONE;
+	}
+
+	DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		return IRQ_HANDLED;
+	}
+#endif
+
+	/* Return here if interrupt is shared and is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+		DP(NETIF_MSG_INTR, "called but int_sem not 0, returning. \n");
+		return IRQ_HANDLED;
+	}
+
+	if (status & 0x2) {
+
+		struct bnx2x_fastpath *fp = &bp->fp[0];
+
+		prefetch(fp->rx_cons_sb);
+		prefetch(fp->tx_cons_sb);
+		prefetch(&fp->status_blk->c_status_block.status_block_index);
+		prefetch(&fp->status_blk->u_status_block.status_block_index);
+
+		netif_rx_schedule(dev);
+
+		status &= ~0x2;
+
+		if (!status) {
+			return IRQ_HANDLED;
+		}
+	}
+
+	if (unlikely(status & 0x1)) {
+
+		tasklet_schedule(&bp->sp_task);
+		status &= ~0x1;
+		if (!status) {
+			return IRQ_HANDLED;
+		}
+	}
+
+	DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
+	   status);
+
+	return IRQ_HANDLED;
+}
+
+static int bnx2x_poll(struct net_device *dev, int *budget)
+{
+
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_fastpath *fp = &bp->fp[0];
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+
+		netif_rx_complete(dev);
+		return 0;
+	}
+#endif
+	prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
+	prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
+	prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb)+256);
+
+	bnx2x_update_fpsb_idx(fp);
+
+	if (*fp->tx_cons_sb != fp->tx_pkt_cons) {
+		bnx2x_tx_int(fp, *budget);
+	}
+
+	if (*fp->rx_cons_sb != fp->rx_comp_cons) {
+		int orig_budget = *budget;
+		int work_done;
+
+		if (orig_budget > dev->quota)
+			orig_budget = dev->quota;
+
+		work_done = bnx2x_rx_int(fp, orig_budget);
+		*budget -= work_done;
+		dev->quota -= work_done;
+	}
+
+	rmb(); /* TBD check*/
+
+	if (!bnx2x_has_work(fp)) {
+		netif_rx_complete(dev);
+
+		bnx2x_ack_sb(bp, fp->index, USTORM_ID, fp->fp_u_idx,
+			     IGU_INT_NOP, 1);
+		bnx2x_ack_sb(bp, fp->index, CSTORM_ID, fp->fp_c_idx,
+			     IGU_INT_ENABLE, 1);
+		return 0;
+	}
+
+	return 1;
+}
+
+static void bnx2x_fw_dump(struct bnx2x *bp)
+{
+	u32 mark, offset;
+	u32 data[9];
+	int word;
+
+	mark = REG_RD(bp, GRCBASE_MCP, MCP_REG_MCPR_SCRATCH + 0xf104);
+	printk(KERN_ERR PFX "begin fw dump (mark %x)\n", mark);
+
+	for (offset = 0xF108; offset <= 0xF900; offset += 0x8*4) {
+		for (word = 0; word < 8; word++) {
+			data[word] = htonl(REG_RD(bp, GRCBASE_MCP,
+						  MCP_REG_MCPR_SCRATCH +
+						  offset + 4*word));
+		}
+		data[8] = 0x0;
+		if (offset + 0x08000000 > mark) {
+			printk("\n****\n");
+			mark = UINT_MAX;
+		}
+		printk("%s", (char *)data);
+	}
+	printk("\n" KERN_ERR PFX "end of fw dump\n");
+}
+
+/* send the MCP a request, block untill there is a reply */
+static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
+{
+	u32 rc = 0;
+	u32 seq = ++bp->fw_seq;
+	int port = bp->port;
+
+	SHMEM_WR(bp, drv_fw_mb[port].drv_mb_header, command|seq);
+	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", command|seq);
+
+	/* let the FW do it's magic ... */
+	msleep(100); /* TBD */
+
+	if (CHIP_REV_IS_SLOW(bp)) {
+		msleep(900);
+	}
+
+	rc = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_header);
+
+	DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
+
+	/* is this a reply to our command? */
+	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK )) {
+		rc &= FW_MSG_CODE_MASK;
+	} else {
+		/* FW BUG! */
+		BNX2X_ERR("FW failed to respond!\n");
+		bnx2x_fw_dump(bp);
+		rc = 0;
+	}
+	return rc;
+}
+
+/* cause the NIG to send a loopback debug packet */
+static void bnx2x_lb_pckt(struct bnx2x *bp)
+{
+	/* Ethernet source and destination addresses */
+	NIG_WR(NIG_REGISTERS_DEBUG_PACKET_LB, 0x55555555);
+	NIG_WR(NIG_REGISTERS_DEBUG_PACKET_LB + 4, 0x55555555);
+	/* SOP */
+	NIG_WR(NIG_REGISTERS_DEBUG_PACKET_LB + 8, 0x20);
+	/* NON-IP protocol */
+	NIG_WR(NIG_REGISTERS_DEBUG_PACKET_LB, 0x09000000);
+	NIG_WR(NIG_REGISTERS_DEBUG_PACKET_LB + 4, 0x55555555);
+	/* EOP, eop_bvalid = 0 */
+	NIG_WR(NIG_REGISTERS_DEBUG_PACKET_LB + 8, 0x10);
+}
+
+/* some of the internal memories are not directly readable from the driver
+   to test them we send debug packets
+ */
+static int bnx2x_int_mem_test(struct bnx2x *bp)
+{
+	u32 val;
+	u32 count;
+	u8 hw;
+	u8 port;
+	u8 i;
+	u32 factor;
+
+	switch (CHIP_REV(bp)) {
+	case CHIP_REV_EMUL:
+		hw = INIT_EMULATION;
+		factor = 2000;
+		break;
+	case CHIP_REV_FPGA:
+		hw = INIT_FPGA;
+		factor = 120;
+		break;
+	default:
+		hw = INIT_ASIC;
+		factor = 1;
+		break;
+	}
+
+	port = PORT0 << bp->port;
+
+	DP(NETIF_MSG_HW, "mem_wrk start part1\n");
+
+	/* Disable inputs of parser neighbor blocks */
+	REG_WR(bp, GRCBASE_TSDM, TSDM_REGISTERS_ENABLE_IN1, 0x0);
+	REG_WR(bp, GRCBASE_TCM, TCM_REGISTERS_PRS_IFEN, 0x0);
+	REG_WR(bp, GRCBASE_CFC, CFC_REGISTERS_DEBUG0, 0x1);
+	NIG_WR(NIG_REGISTERS_PRS_REQ_IN_EN, 0x0);
+
+	/*  Write 0 to parser credits for CFC search request */
+	REG_WR(bp, GRCBASE_PRS, PRS_REGISTERS_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+	/* send Ethernet packet */
+	bnx2x_lb_pckt(bp);
+
+	/* TODO do i reset NIG statistic ?*/
+	/* Wait until NIG register shows 1 packet of size 0x10 */
+	count = 1000;
+	while (count) {
+		val = REG_RD(bp, GRCBASE_NIG, NIG_REGISTERS_STAT2_BRB_OCTET);
+		REG_RD(bp, GRCBASE_NIG, NIG_REGISTERS_STAT2_BRB_OCTET + 4);
+
+		if (val == 0x10) {
+			break;
+		}
+		msleep(10 * factor);
+		count--;
+	}
+	if (val != 0x10) {
+		BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+		return -1;
+	}
+
+	/* Wait until PRS register shows 1 packet */
+	count = 1000;
+	while (count) {
+		val = REG_RD(bp, GRCBASE_PRS, PRS_REGISTERS_NUM_OF_PACKETS);
+
+		if (val == 0x1) {
+			break;
+		}
+		msleep(10 * factor);
+		count--;
+	}
+	if (val != 0x1) {
+		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+		return -2;
+	}
+
+	/* Reset and init BRB, PRS */
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
+	msleep(50);
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_1_SET, 0x3);
+	msleep(50);
+	bnx2x_init_stage(bp, BLCNUM_BRB1, COMMON);
+	bnx2x_init_stage(bp, BLCNUM_PRS, COMMON);
+
+	DP(NETIF_MSG_HW, "part2\n");
+
+	/* Disable inputs of parser neighbor blocks */
+	REG_WR(bp, GRCBASE_TSDM, TSDM_REGISTERS_ENABLE_IN1, 0x0);
+	REG_WR(bp, GRCBASE_TCM, TCM_REGISTERS_PRS_IFEN, 0x0);
+	REG_WR(bp, GRCBASE_CFC, CFC_REGISTERS_DEBUG0, 0x1);
+	NIG_WR(NIG_REGISTERS_PRS_REQ_IN_EN, 0x0);
+
+	/* Write 0 to parser credits for CFC search request */
+	REG_WR(bp, GRCBASE_PRS, PRS_REGISTERS_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+	/* send 10 Ethernet packets */
+	for (i = 0; i < 10; i++) {
+		bnx2x_lb_pckt(bp);
+	}
+
+	/* Wait until NIG register shows 10 + 1
+	   packets of size 11*0x10 = 0xb0 */
+	count = 1000;
+	while (count) {
+
+		val = REG_RD(bp, GRCBASE_NIG, NIG_REGISTERS_STAT2_BRB_OCTET);
+		REG_RD(bp, GRCBASE_NIG, NIG_REGISTERS_STAT2_BRB_OCTET + 4);
+
+		if (val == 0xb0) {
+			break;
+		}
+		msleep(10 * factor);
+		count--;
+	}
+	if (val != 0xb0) {
+		BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+		return -3;
+	}
+
+	/* Wait until PRS register shows 2 packet */
+	val = REG_RD(bp, GRCBASE_PRS, PRS_REGISTERS_NUM_OF_PACKETS);
+
+	if (val != 0x2) {
+		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+	}
+
+	/* Write 1 to parser credits for CFC search request */
+	REG_WR(bp, GRCBASE_PRS, PRS_REGISTERS_CFC_SEARCH_INITIAL_CREDIT, 0x1);
+
+	/* Wait until PRS register shows 3 packet */
+	msleep(10 * factor);
+	/* Wait until NIG register shows 1 packet of size 0x10 */
+	val = REG_RD(bp, GRCBASE_PRS, PRS_REGISTERS_NUM_OF_PACKETS);
+
+	if (val != 0x3) {
+		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+	}
+	/* clear NIG EOP FIFO */
+	for (i = 0; i < 11; i++) {
+		REG_RD(bp, GRCBASE_NIG, NIG_REGISTERS_INGRESS_EOP_LB_FIFO);
+	}
+	val = REG_RD(bp, GRCBASE_NIG, NIG_REGISTERS_INGRESS_EOP_LB_EMPTY);
+	if (val != 1) {
+		BNX2X_ERR("clear of NIG failed\n");
+		return -4;
+	}
+
+	/* Reset and init BRB, PRS, NIG */
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+	msleep(50);
+	REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+	msleep(50);
+	bnx2x_init_stage(bp, BLCNUM_BRB1  , COMMON);
+	bnx2x_init_stage(bp, BLCNUM_PRS  , COMMON);
+
+	if (!iscsi_active) {
+		/* set NIC mode */
+		REG_WR(bp, GRCBASE_PRS, PRS_REGISTERS_NIC_MODE, 1);
+	}
+
+	/* Enable inputs of parser neighbor blocks */
+	REG_WR(bp, GRCBASE_TSDM, TSDM_REGISTERS_ENABLE_IN1, 0x7fffffff);
+	REG_WR(bp, GRCBASE_TCM, TCM_REGISTERS_PRS_IFEN, 0x1);
+	REG_WR(bp, GRCBASE_CFC, CFC_REGISTERS_DEBUG0, 0x0);
+	NIG_WR(NIG_REGISTERS_PRS_REQ_IN_EN, 0x1);
+
+	DP(NETIF_MSG_HW, "done\n");
+	return 0; /* OK */
+}
+
+static int bnx2x_funcion_init(struct bnx2x *bp, int mode)
+{
+	int val, i;
+	u16 j, r_order, w_order;
+	int func = bp->port;
+	int port = func ? PORT1 : PORT0;
+	u32 wb_write[2];
+
+	DP(BNX2X_MSG_MCP, "function is %d mode is %X\n", func, mode);
+
+	if ((func != 0) && (func != 1)) {
+		BNX2X_ERR("BAD function number (%d)\n", func);
+		return -ENODEV;
+	}
+
+	if (mode & 0x1) {	/* init common */
+		DP(BNX2X_MSG_MCP, "starting common init, func %d, mode %x\n",
+		   func, mode);
+		REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_1,
+		       0xffffffff);
+		REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_2,
+		       0xfffc);
+		bnx2x_init_stage(bp, BLCNUM_MISC   , COMMON);
+
+		REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_LCPLL_CTRL_REG_2,
+		       0x100);
+		msleep(30);
+		REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_LCPLL_CTRL_REG_2,
+		       0x0);
+
+		bnx2x_init_stage(bp, BLCNUM_PXP   , COMMON);
+		bnx2x_init_stage(bp, BLCNUM_PXP2  , COMMON);
+
+		pci_read_config_word(bp->pdev,
+				     bp->pcie_cap + PCI_EXP_DEVCTL, &j);
+		DP(NETIF_MSG_HW, "read %x from devctl\n", j);
+		w_order = ((j & PCI_EXP_DEVCTL_PAYLOAD) >> 5 );	/* 0x0x00e0 */
+		r_order = ((j & PCI_EXP_DEVCTL_READRQ) >> 12 );	/* 0x7000 */
+
+		bnx2x_init_pxp(bp, r_order, w_order);
+
+		if (CHIP_REV(bp) == CHIP_REV_Ax) {
+		/* enable hw interrupt from PXP on usdm
+		   overflow bit 16 on INT_MASK_0 */
+			REG_WR(bp, GRCBASE_PXP,
+			       PXP_REGISTERS_PXP_INT_MASK_0, 0);
+		}
+
+#ifdef __BIG_ENDIAN
+#error big endian platforms not supported yet
+#warning big endian driver being compiled
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_QM_ENDIAN_M, 1);
+/* REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_TM_ENDIAN_M,  1); */
+/* REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_SRC_ENDIAN_M, 1); */
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_CDU_ENDIAN_M, 1);
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_DBG_ENDIAN_M, 1);
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_HC_ENDIAN_M, 1);
+
+/* REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RD_PBF_SWAP_MODE, 1); */
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RD_QM_SWAP_MODE, 1);
+/* REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RD_TM_SWAP_MODE, 1); */
+/* REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RD_SRC_SWAP_MODE, 1); */
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RD_CDURD_SWAP_MODE, 1);
+
+#endif /* BIG_ENDIAN */
+
+		if (!iscsi_active) {
+			/* set NIC mode */
+			REG_WR(bp, GRCBASE_PRS, PRS_REGISTERS_NIC_MODE, 1);
+		}
+
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_CDU_P_SIZE, 5);
+
+		if (iscsi_active) {
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_TM_P_SIZE, 5);
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_QM_P_SIZE, 5);
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_SRC_P_SIZE, 5);
+		}
+
+		bnx2x_init_stage(bp, BLCNUM_TCM, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_UCM, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_CCM, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_XCM, COMMON);
+		/* TBD merge with init tool? */
+		REG_RD(bp, GRCBASE_XSEM, XSEM_REGISTERS_PASSIVE_BUFFER);
+		REG_RD(bp, GRCBASE_XSEM, XSEM_REGISTERS_PASSIVE_BUFFER + 4);
+		REG_RD(bp, GRCBASE_XSEM, XSEM_REGISTERS_PASSIVE_BUFFER + 8);
+		REG_RD(bp, GRCBASE_CSEM, CSEM_REGISTERS_PASSIVE_BUFFER);
+		REG_RD(bp, GRCBASE_CSEM, CSEM_REGISTERS_PASSIVE_BUFFER + 4);
+		REG_RD(bp, GRCBASE_CSEM, CSEM_REGISTERS_PASSIVE_BUFFER + 8);
+		REG_RD(bp, GRCBASE_TSEM, TSEM_REGISTERS_PASSIVE_BUFFER);
+		REG_RD(bp, GRCBASE_TSEM, TSEM_REGISTERS_PASSIVE_BUFFER + 4);
+		REG_RD(bp, GRCBASE_TSEM, TSEM_REGISTERS_PASSIVE_BUFFER + 8);
+		REG_RD(bp, GRCBASE_USEM, USEM_REGISTERS_PASSIVE_BUFFER);
+		REG_RD(bp, GRCBASE_USEM, USEM_REGISTERS_PASSIVE_BUFFER + 4);
+		REG_RD(bp, GRCBASE_USEM, USEM_REGISTERS_PASSIVE_BUFFER + 8);
+
+		bnx2x_init_stage(bp, BLCNUM_QM, COMMON);
+		/* softrest pulse */
+		REG_WR(bp, GRCBASE_QM, QM_REGISTERS_SOFT_RESET, 1);
+		REG_WR(bp, GRCBASE_QM, QM_REGISTERS_SOFT_RESET, 0);
+
+		if (iscsi_active)
+			bnx2x_init_stage(bp, BLCNUM_TIMERS, COMMON);
+
+		bnx2x_init_stage(bp, BLCNUM_DQ, COMMON);
+		REG_WR(bp, GRCBASE_DQ, DORQ_REGISTERS_DPM_CID_OFST,
+		       BCM_PAGE_BITS);
+		if (CHIP_REV(bp) == CHIP_REV_Ax) {
+			/* enable hw interrupt from doorbell Q */
+			REG_WR(bp, GRCBASE_DQ, DORQ_REGISTERS_DORQ_INT_MASK,
+			       0);
+		}
+
+		bnx2x_init_stage(bp, BLCNUM_BRB1, COMMON);
+
+		if (CHIP_REV_IS_SLOW(bp)) {
+			/* fix for emulation and FPGA for no pause */
+			REG_WR(bp, GRCBASE_BRB1,
+			       BRB1_REGISTERS_PAUSE_HIGH_THRESHOLD_0, 513);
+			REG_WR(bp, GRCBASE_BRB1,
+			       BRB1_REGISTERS_PAUSE_HIGH_THRESHOLD_1, 513);
+			REG_WR(bp, GRCBASE_BRB1,
+			       BRB1_REGISTERS_PAUSE_LOW_THRESHOLD_0, 0);
+			REG_WR(bp, GRCBASE_BRB1,
+			       BRB1_REGISTERS_PAUSE_LOW_THRESHOLD_1, 0);
+		}
+
+		bnx2x_init_stage(bp, BLCNUM_PRS, COMMON);
+
+		bnx2x_init_stage(bp, BLCNUM_TSDM, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_CSDM, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_USDM, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_XSDM, COMMON);
+
+		bnx2x_zero_intmem(bp);
+
+		bnx2x_init_stage(bp, BLCNUM_TSEM, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_USEM, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_CSEM, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_XSEM, COMMON);
+
+		/* sync semi rtc */
+		REG_WR(bp, GRCBASE_MISC,
+		       MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000);
+		REG_WR(bp, GRCBASE_MISC,
+		       MISC_REGISTERS_RESET_REG_1_SET, 0x80000000);
+
+		bnx2x_init_stage(bp, BLCNUM_UPB, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_XPB, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_PBF, COMMON);
+
+		if (iscsi_active) { /* also used by multi */
+
+			REG_WR(bp, GRCBASE_SRCH, SRC_REGISTERS_SOFT_RST, 1);
+			for (i = SRC_REGISTERS_KEYRSS0_0;
+			      i <= SRC_REGISTERS_KEYRSS1_9; i += 4) {
+				REG_WR(bp, GRCBASE_SRCH, i, 0xc0cac01a);
+				/* todo: repleace with something meaningfull */
+			}
+			bnx2x_init_stage(bp, BLCNUM_SRCH, COMMON);
+			REG_WR(bp, GRCBASE_SRCH, SRC_REGISTERS_SOFT_RST, 0);
+		}
+		if (sizeof(union cdu_context) != 1024) {
+			printk(KERN_ALERT PFX "please adjust the size of"
+			       " cdu_context(%ld)\n",
+			       (long)sizeof(union cdu_context));
+		}
+		val = (4 << 24) + (0 << 12) + 1024;
+		REG_WR(bp, GRCBASE_CDU, CDU_REGISTERS_CDU_GLOBAL_PARAMS, val);
+		bnx2x_init_stage(bp, BLCNUM_CDU, COMMON);
+
+		bnx2x_init_stage(bp, BLCNUM_CFC, COMMON);
+		REG_WR(bp, GRCBASE_CFC, CFC_REGISTERS_INIT_REG, 0x7FF);
+
+		bnx2x_init_stage(bp, BLCNUM_HC, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_MISC_AEU, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_PXPCS, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_EMAC0, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_EMAC1, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_DBU, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_DBG, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_NIG, COMMON);
+		bnx2x_init_stage(bp, BLCNUM_DMAE, COMMON);
+
+		/* finish PXP init
+		   (can be moved up if we want to use the DMAE) */
+		val = REG_RD(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_CFG_DONE);
+		if (val != 1) {
+			BNX2X_ERR("CFC CFG failed\n");
+			return -EBUSY;
+		}
+
+		val = REG_RD(bp, GRCBASE_PXP2, PXP2_REGISTERS_RD_INIT_DONE);
+		if (val != 1) {
+			BNX2X_ERR("CFC init failed\n");
+			return -EBUSY;
+		}
+
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_DISABLE_INPUTS, 0);
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RD_DISABLE_INPUTS, 0);
+
+		/* finish CFC init */
+		val = REG_RD(bp, GRCBASE_CFC, CFC_REGISTERS_LL_INIT_DONE);
+		if (val != 1) {
+			BNX2X_ERR("CFC LL INIT failed\n");
+			return -EBUSY;
+		}
+
+		val = REG_RD(bp, GRCBASE_CFC, CFC_REGISTERS_AC_INIT_DONE);
+		if (val != 1) {
+			BNX2X_ERR("CFC AC INIT failed\n");
+			return -EBUSY;
+		}
+
+		val = REG_RD(bp, GRCBASE_CFC, CFC_REGISTERS_CAM_INIT_DONE);
+		if (val != 1) {
+			BNX2X_ERR("CFC CAM INIT failed\n");
+			return -EBUSY;
+		}
+
+		REG_WR(bp, GRCBASE_CFC, CFC_REGISTERS_DEBUG0, 0);
+
+		/* read NIG statistic
+		   to see if this is our first up since powerup*/
+		val = REG_RD(bp, GRCBASE_NIG, NIG_REGISTERS_STAT2_BRB_OCTET);
+		REG_RD(bp, GRCBASE_NIG, NIG_REGISTERS_STAT2_BRB_OCTET + 4);
+
+		/* do internal memory self test */
+		if (val == 0 && bnx2x_int_mem_test(bp)) {
+			BNX2X_ERR("internal mem selftest failed\n");
+			return -EBUSY;
+		}
+	}/* end of common init */
+
+	/* per port init */
+
+	/* the phys address is shifted right 12 bits and has an added
+	   1=valid bit added to the 53rd bit
+	   then since this is a wide register(TM)
+	   we split it into two 32 bit writes
+	 */
+#define ONCHIP_ADDR1(x)   ((u32)( (u64)x >> 12 & 0xFFFFFFFF ))
+#define ONCHIP_ADDR2(x)   ((u32)( 1 << 20 | (u64)x >> 44 ))
+#define PXP_ONE_ILT(x)    ((x << 10)|x)
+
+	DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
+
+	REG_WR(bp, GRCBASE_NIG, NIG_REGISTERS_MASK_INTERRUPT_PORT0 + func*4,
+	       0);
+	bnx2x_init_stage(bp, BLCNUM_PXP, port);
+	bnx2x_init_stage(bp, BLCNUM_PXP2, port);
+
+	if (func == 0) {
+
+		/* offset is 0 */
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_ONCHIP_AT,
+		       ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_ONCHIP_AT + 4,
+		       ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_PSWRQ_CDU0_L2P,
+		       PXP_ONE_ILT(0));
+
+		if (iscsi_active) {
+			/* ofset is 1 */
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 8,
+			       ONCHIP_ADDR1(bp->timers_mapping));
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 12,
+			       ONCHIP_ADDR2(bp->timers_mapping));
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_PSWRQ_TM0_L2P,
+			       PXP_ONE_ILT(1));
+
+			/* 3 */
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 2*8,
+			       ONCHIP_ADDR1(bp->qm_mapping));
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 2*8 + 4,
+			       ONCHIP_ADDR2(bp->qm_mapping));
+			REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_PSWRQ_QM0_L2P,
+			       PXP_ONE_ILT(2));
+
+			/* 4 */
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 3*8,
+			       ONCHIP_ADDR1(bp->t1_mapping));
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 3*8 + 4,
+			       ONCHIP_ADDR2(bp->t1_mapping));
+			REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_PSWRQ_SRC0_L2P,
+			       PXP_ONE_ILT(3));
+		}
+	} else {
+		/* second function addresses start from 384 */
+		/* 384; */
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_RQ_ONCHIP_AT + 384*8,
+		       ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
+		REG_WR(bp, GRCBASE_PXP2,
+		       PXP2_REGISTERS_RQ_ONCHIP_AT + 384*8 + 4,
+		       ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
+		REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_PSWRQ_CDU1_L2P,
+		       PXP_ONE_ILT(384));
+
+		if (iscsi_active) {
+			/* 385 */
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 385*8,
+			       ONCHIP_ADDR1(bp->timers_mapping));
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 385*8 + 4,
+			       ONCHIP_ADDR2(bp->timers_mapping));
+			REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_PSWRQ_TM1_L2P,
+			       PXP_ONE_ILT(385));
+
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 386*8,
+			       ONCHIP_ADDR1(bp->qm_mapping));
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 386*8 + 4,
+			       ONCHIP_ADDR2(bp->qm_mapping));
+			REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_PSWRQ_QM1_L2P,
+			       PXP_ONE_ILT(386));
+
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 387*8,
+			       ONCHIP_ADDR1(bp->t1_mapping));
+			REG_WR(bp, GRCBASE_PXP2,
+			       PXP2_REGISTERS_RQ_ONCHIP_AT + 387*8 + 4,
+			       ONCHIP_ADDR2(bp->t1_mapping));
+			REG_WR(bp, GRCBASE_PXP2, PXP2_REGISTERS_PSWRQ_SRC1_L2P,
+			       PXP_ONE_ILT(387));
+		}
+	}
+
+	bnx2x_init_stage(bp, BLCNUM_TCM, port);
+	bnx2x_init_stage(bp, BLCNUM_UCM, port);
+	bnx2x_init_stage(bp, BLCNUM_CCM, port);
+	bnx2x_init_stage(bp, BLCNUM_XCM, port);
+
+	if (func == 0) {
+		for (i = 0; i < 32; i++) {
+			REG_WR(bp, GRCBASE_QM, QM_REGISTERS_BASEADDR + 4*i ,
+			       1024 * 4 * i);
+			REG_WR(bp, GRCBASE_QM, QM_REGISTERS_PTRTBL + 4*i , 0);
+		}
+		REG_WR(bp, GRCBASE_QM, QM_REGISTERS_CONNNUM_0, 1024/16 -1);
+	} else {
+		for (i = 0; i < 32; i++) {
+			REG_WR(bp, GRCBASE_QM,
+			       QM_REGISTERS_BASEADDR + 4*(32 + i), 1024 * 4);
+			REG_WR(bp, GRCBASE_QM,
+			       QM_REGISTERS_PTRTBL + 4*(32 + i), 0);
+		}
+		REG_WR(bp, GRCBASE_QM, QM_REGISTERS_CONNNUM_1, 1024/16 -1);
+	}
+
+	bnx2x_init_stage(bp, BLCNUM_QM, port);
+
+
+	if (iscsi_active) {
+		REG_WR(bp, GRCBASE_TIMERS,
+		       TM_REGISTERS_LIN0_SCAN_TIME + 4*func, 1024/64*20);
+
+		REG_WR(bp, GRCBASE_TIMERS,
+		       TM_REGISTERS_LIN0_MAX_ACTIVE_CID + 4*func, 31);
+		bnx2x_init_stage(bp, BLCNUM_TIMERS, port);
+	}
+
+	bnx2x_init_stage(bp, BLCNUM_DQ, port);
+	bnx2x_init_stage(bp, BLCNUM_BRB1, port);
+	bnx2x_init_stage(bp, BLCNUM_PRS, port);
+	bnx2x_init_stage(bp, BLCNUM_TSDM, port);
+	bnx2x_init_stage(bp, BLCNUM_CSDM, port);
+	bnx2x_init_stage(bp, BLCNUM_USDM, port);
+	bnx2x_init_stage(bp, BLCNUM_XSDM, port);
+	bnx2x_init_stage(bp, BLCNUM_TSEM, port);
+	bnx2x_init_stage(bp, BLCNUM_USEM, port);
+	bnx2x_init_stage(bp, BLCNUM_CSEM, port);
+	bnx2x_init_stage(bp, BLCNUM_XSEM, port);
+	bnx2x_init_stage(bp, BLCNUM_UPB, port);
+	bnx2x_init_stage(bp, BLCNUM_XPB, port);
+	bnx2x_init_stage(bp, BLCNUM_PBF, port);
+
+	/* configure PBF to work without PAUSE mtu 9000 */
+	REG_WR(bp, GRCBASE_PBF, PBF_REGISTERS_P0_PAUSE_ENABLE + 4*func, 0);
+
+	/* update threshold */
+	REG_WR(bp, GRCBASE_PBF,
+	       PBF_REGISTERS_P0_ARB_THRSH + 4*func, (9040/16));
+	/* update init credit */
+	REG_WR(bp, GRCBASE_PBF, PBF_REGISTERS_P0_INIT_CRD + 4*func,
+	       (9040/16) + 553 - 22);
+
+	/* probe changes */
+	REG_WR(bp, GRCBASE_PBF, PBF_REGISTERS_INIT_P0 + 4*func, 1);
+	msleep(5);
+	REG_WR(bp, GRCBASE_PBF, PBF_REGISTERS_INIT_P0 + 4*func, 0);
+
+
+	if (iscsi_active) {
+		/* tell the searcher where the T2 table is */
+		REG_WR(bp, GRCBASE_SRCH, SRC_REGISTERS_COUNTFREE0 + 4*func,
+		       16*1024/64);
+
+		wb_write[0] = U64_LO(bp->t2_mapping);
+		wb_write[1] = U64_HI(bp->t2_mapping);
+		bnx2x_wb_write_dmae(bp,
+				   (GRCBASE_SRCH + SRC_REGISTERS_FIRSTFREE0
+				    + 4*func), wb_write, 2);
+		wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
+		wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
+		bnx2x_wb_write_dmae(bp,
+				    (GRCBASE_SRCH + SRC_REGISTERS_LASTFREE0
+				     + 4*func), wb_write, 2);
+
+		REG_WR(bp, GRCBASE_SRCH,
+		       SRC_REGISTERS_NUMBER_HASH_BITS0 + 4*func, 10);
+		bnx2x_init_stage(bp, BLCNUM_SRCH, port);
+	}
+
+	bnx2x_init_stage(bp, BLCNUM_CDU, port);
+	bnx2x_init_stage(bp, BLCNUM_CFC, port);
+	bnx2x_init_stage(bp, BLCNUM_HC, port);
+	bnx2x_init_stage(bp, BLCNUM_MISC_AEU, port);
+	bnx2x_init_stage(bp, BLCNUM_PXPCS, port);
+	bnx2x_init_stage(bp, BLCNUM_EMAC0, port);
+	bnx2x_init_stage(bp, BLCNUM_EMAC1, port);
+	bnx2x_init_stage(bp, BLCNUM_DBU, port);
+	bnx2x_init_stage(bp, BLCNUM_DBG, port);
+	bnx2x_init_stage(bp, BLCNUM_NIG, port);
+	REG_WR(bp, GRCBASE_NIG,
+	       NIG_REGISTERS_XGXS_SERDES0_MODE_SEL + 4*func, 1);
+	bnx2x_init_stage(bp, BLCNUM_MCP, port);
+	bnx2x_init_stage(bp, BLCNUM_DMAE, port);
+
+	bnx2x_link_reset(bp);
+	/*Reset pciex errors for debug */
+	REG_WR(bp, GRCBASE_PXPCS, 0x2114, 0xffffffff);
+	REG_WR(bp, GRCBASE_PXPCS, 0x2120, 0xffffffff);
+	REG_WR(bp, GRCBASE_PXPCS, 0x2814, 0xffffffff);
+
+	if (!nomcp) {
+		port = bp->port;
+
+		bp->fw_drv_pulse_wr_seq =
+				(SHMEM_RD(bp, drv_fw_mb[port].drv_pulse_mb) &
+				 DRV_PULSE_SEQ_MASK);
+		bp->fw_mb = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_param);
+		DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  fw_mb 0x%x\n",
+		   bp->fw_drv_pulse_wr_seq, bp->fw_mb);
+	} else {
+		bp->fw_mb = 0;
+	}
+
+	return 0;
+}
+
+static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
+{
+	int port = bp->port;
+
+	DP(NETIF_MSG_IFDOWN, "reset called with code (%x)\n", reset_code);
+
+	/* Do not rcv packets to BRB  */
+	REG_WR(bp, GRCBASE_NIG,
+	       NIG_REGISTERS_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+	/* Do not direct rcv packets that are not for MCP to the brb  */
+	REG_WR(bp, GRCBASE_NIG, (port ? NIG_REGISTERS_LLH1_BRB1_NOT_MCP
+				 : NIG_REGISTERS_LLH0_BRB1_NOT_MCP), 0x0);
+
+	/* Configure IGU and AEU.*/
+	REG_WR(bp, GRCBASE_HC, HC_REGISTERS_CONFIG_0+(port*4), 0x1000);
+	REG_WR(bp, GRCBASE_MISC_AEU,
+	       MISC_REGISTERS_AEU_MASK_ATTN_FUNC_0+(port*4), 0);
+
+	/*TODO Close Doorbells port.*/
+
+	if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+		/* reset_common */
+		REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_1_CLEAR,
+		       0xf3ffff7f);
+		REG_WR(bp, GRCBASE_MISC, MISC_REGISTERS_RESET_REG_2_CLEAR,
+		       0x1403);
+	}
+}
+
+static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
+{
+	u16 pmcsr;
+
+	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+	switch (state) {
+	case PCI_D0:
+		pci_write_config_word(bp->pdev,
+				      bp->pm_cap + PCI_PM_CTRL,
+				      (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+				      PCI_PM_CTRL_PME_STATUS);
+
+		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+		/* delay required during transition out of D3hot */
+			msleep(20);
+		break;
+
+	case PCI_D3hot:
+		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+		pmcsr |= 3;
+
+		if (bp->wol) {
+			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+		}
+		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+				      pmcsr);
+
+		/* No more memory access after this point until
+		* device is brought back to D0.
+		*/
+		/* udelay(50); TBD */
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void bnx2x_init_def_sb(struct bnx2x *bp,
+			      struct host_def_status_block *def_sb,
+			      dma_addr_t mapping, int id)
+{
+	int port = bp->port;
+	int index, val, reg_offset;
+	u64 section;
+
+	/* ATTN */
+	section = ((u64)mapping) + offsetof(struct host_def_status_block,
+					   atten_status_block);
+	def_sb->atten_status_block.status_block_id = id;
+
+	reg_offset = (port ? MISC_REGISTERS_AEU_ENABLE1_FUNC_1_OUT_0 :
+		      MISC_REGISTERS_AEU_ENABLE1_FUNC_0_OUT_0);
+
+	for (index = 0; index < 3; index++) {
+		bp->attn_group[index].sig[0] =
+			REG_RD(bp, GRCBASE_MISC_AEU, reg_offset + 0x10*index);
+		bp->attn_group[index].sig[1] =
+			REG_RD(bp, GRCBASE_MISC_AEU,
+			       reg_offset + 0x4 + 0x10*index);
+		bp->attn_group[index].sig[2] =
+			REG_RD(bp, GRCBASE_MISC_AEU,
+			       reg_offset + 0x8 + 0x10*index);
+		bp->attn_group[index].sig[3] =
+			REG_RD(bp, GRCBASE_MISC_AEU,
+			       reg_offset + 0xc + 0x10*index);
+	}
+
+	bp->aeu_mask = REG_RD(bp, GRCBASE_MISC_AEU,
+			      (port ? MISC_REGISTERS_AEU_MASK_ATTN_FUNC_1
+			       : MISC_REGISTERS_AEU_MASK_ATTN_FUNC_0));
+
+	reg_offset = (port? HC_REGISTERS_ATTN_MSG1_ADDR_L :
+			    HC_REGISTERS_ATTN_MSG0_ADDR_L);
+
+	REG_WR(bp, GRCBASE_HC, reg_offset , U64_LO(section));
+	REG_WR(bp, GRCBASE_HC, reg_offset + 4, U64_HI(section));
+
+	reg_offset = (port ? HC_REGISTERS_ATTN_NUM_P1 :
+		      HC_REGISTERS_ATTN_NUM_P0);
+
+	val = REG_RD(bp, GRCBASE_HC, reg_offset );
+
+	val |= id;
+	REG_WR(bp, GRCBASE_HC, reg_offset , val);
+
+	/* USTORM */
+	section = ((u64)mapping) + offsetof(struct host_def_status_block,
+					   u_def_status_block);
+
+	def_sb->u_def_status_block.status_block_id = id;
+
+	REG_WR32(bp, BAR_USTRORM_INTMEM,
+		 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
+	REG_WR32(bp, BAR_USTRORM_INTMEM,
+		 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
+		 U64_HI(section));
+	REG_WR32(bp, BAR_USTRORM_INTMEM,
+		 USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+
+	for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) {
+		REG_WR16(bp, BAR_USTRORM_INTMEM,
+			 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
+	}
+
+	/* CSTORM */
+	section = ((u64)mapping) +
+		 offsetof(struct host_def_status_block, c_def_status_block);
+	def_sb->c_def_status_block.status_block_id = id;
+
+	REG_WR32(bp, BAR_CSTRORM_INTMEM,
+		 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
+	REG_WR32(bp, BAR_CSTRORM_INTMEM,
+		 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
+		 U64_HI(section));
+	REG_WR32(bp, BAR_CSTRORM_INTMEM,
+		 CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+
+	for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) {
+		REG_WR16(bp, BAR_CSTRORM_INTMEM,
+			 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
+	}
+
+	/* TSTORM */
+	section = ((u64)mapping) +
+		 offsetof(struct host_def_status_block, t_def_status_block);
+	def_sb->t_def_status_block.status_block_id = id;
+
+	REG_WR32(bp, BAR_TSTRORM_INTMEM,
+		 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
+	REG_WR32(bp, BAR_TSTRORM_INTMEM,
+		 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
+		 U64_HI(section));
+	REG_WR32(bp, BAR_TSTRORM_INTMEM,
+		 TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+
+	for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) {
+		REG_WR16(bp, BAR_TSTRORM_INTMEM,
+			 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
+	}
+
+	/* XSTORM */
+	section = ((u64)mapping) +
+		 offsetof(struct host_def_status_block, x_def_status_block);
+	def_sb->x_def_status_block.status_block_id = id;
+
+	REG_WR32(bp, BAR_XSTRORM_INTMEM,
+		 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
+	REG_WR32(bp, BAR_XSTRORM_INTMEM,
+		 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
+		 U64_HI(section));
+	REG_WR32(bp, BAR_XSTRORM_INTMEM,
+		 XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+
+	for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) {
+		REG_WR16(bp, BAR_XSTRORM_INTMEM,
+			 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
+	}
+
+	bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
+			  dma_addr_t mapping, int id)
+{
+	int index;
+	int port = bp->port;
+	u64 section;
+
+	/* USTORM */
+	section = ((u64)mapping) +
+		 offsetof(struct host_status_block, u_status_block);
+	sb->u_status_block.status_block_id = id;
+
+	REG_WR32(bp, BAR_USTRORM_INTMEM,
+		 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
+	REG_WR32(bp, BAR_USTRORM_INTMEM,
+		 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
+		 U64_HI(section));
+
+	for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) {
+		REG_WR16(bp, BAR_USTRORM_INTMEM,
+			 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
+	}
+
+	/* CSTORM */
+	section = ((u64)mapping) +
+		 offsetof(struct host_status_block, c_status_block);
+	sb->c_status_block.status_block_id = id;
+
+	REG_WR32(bp, BAR_CSTRORM_INTMEM,
+		 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
+	REG_WR32(bp, BAR_CSTRORM_INTMEM,
+		 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
+		 U64_HI(section));
+
+	for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) {
+		REG_WR16(bp, BAR_CSTRORM_INTMEM,
+			 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
+	}
+
+	bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+static void bnx2x_init_tx_ring(struct bnx2x *bp)
+{
+	int i, j;
+
+	for_each_queue(bp, j) {
+
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+		for (i = 1; i <= NUM_TX_RINGS ; i++) {
+
+			struct eth_tx_bd *nextbd =
+				&fp->tx_desc_ring[TX_DESC_CNT*i -1];
+
+			nextbd->addr_hi =
+				U64_HI(fp->tx_desc_mapping +
+				       BCM_PAGE_SIZE*(i % NUM_TX_RINGS) );
+			nextbd->addr_lo =
+				U64_LO(fp->tx_desc_mapping +
+				       BCM_PAGE_SIZE*(i % NUM_TX_RINGS) );
+		}
+
+		fp->tx_pkt_prod = 0;
+		fp->tx_pkt_cons = 0;
+		fp->tx_bd_prod = 0;
+		fp->tx_bd_cons = 0;
+		fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
+
+	}
+}
+
+static void bnx2x_init_sp_ring(struct bnx2x *bp)
+{
+	int port = bp->port;
+
+	spin_lock_init(&bp->spq_lock);
+
+	bp->spq_left =  MAX_SPQ_PENDING;
+	bp->spq_con_idx = 0;
+	bp->spq_hw_con  = BNX2X_SPQ_SB_INDEX;
+	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+	bp->spq_prod_bd = bp->spq;
+	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
+	bp->spq_prod_idx = 0;
+
+	REG_WR32(bp, BAR_XSTRORM_INTMEM, XSTORM_SPQ_PAGE_BASE_OFFSET(port)+4,
+		 U64_HI(bp->spq_mapping));
+	REG_WR32(bp, BAR_XSTRORM_INTMEM, XSTORM_SPQ_PAGE_BASE_OFFSET(port),
+		 U64_LO(bp->spq_mapping));
+
+	REG_WR(bp, GRCBASE_XSEM, XSEM_REGISTERS_FAST_MEMORY +
+	       XSTORM_SPQ_PROD_OFFSET(port), bp->spq_prod_idx);
+}
+
+static void bnx2x_init_rx_rings(struct bnx2x *bp)
+{
+	u16 ring_prod;
+	int i, j;
+	int port = bp->port;
+
+	/* 8 for CRC and VLAN */
+	bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
+	/* 64 for alignment TBD check*/
+	bp->rx_buf_size = bp->rx_buf_use_size + 64;
+
+	for_each_queue(bp, j) {
+
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+#ifdef BNX2X_STOP_ON_ERROR
+		fp->last_alloc = fp->next_free = 0;
+#endif
+		fp->rx_bd_cons = 0;
+		fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+
+		for (i = 1; i <= NUM_RX_RINGS ; i++) {
+
+			struct eth_rx_bd *nextbd;
+
+			nextbd = & fp->rx_desc_ring[RX_DESC_CNT*i -1];
+			nextbd->addr_hi =
+				U64_HI(fp->rx_desc_mapping +
+				       BCM_PAGE_SIZE*(i % NUM_RX_RINGS));
+			nextbd->addr_lo =
+				U64_LO(fp->rx_desc_mapping +
+				       BCM_PAGE_SIZE*(i % NUM_RX_RINGS));
+
+			nextbd = (struct eth_rx_bd *)
+				 &fp->rx_comp_ring[RX_DESC_CNT*i -1];
+
+			nextbd->addr_hi =
+				U64_HI(fp->rx_comp_mapping +
+				       BCM_PAGE_SIZE*(i % NUM_RX_RINGS));
+			nextbd->addr_lo =
+				U64_LO(fp->rx_comp_mapping +
+				       BCM_PAGE_SIZE*(i % NUM_RX_RINGS));
+		}
+
+		/*  rx completion queue */
+		fp->rx_comp_cons = ring_prod = 0;
+
+		for (i = 0 ; i < bp->rx_ring_size; i++) {
+			if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+				BNX2X_ERR("was only able to allocate "
+					  "%d Rx skbs\n", i);
+				break;
+			}
+			ring_prod = NEXT_RX_IDX(ring_prod);
+			BUG_TRAP(ring_prod > i);
+		}
+
+		fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
+
+		/* Warning! this will genrate an interrupt (to the TSTORM) */
+		/* must only be done when chip is initialized */
+		REG_WR(bp, BAR_TSTRORM_INTMEM,
+		       TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
+		if (j != 0)
+			continue;
+
+		REG_WR(bp, BAR_USTRORM_INTMEM,
+		       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET,
+		       U64_LO(fp->rx_comp_mapping));
+		REG_WR(bp, BAR_USTRORM_INTMEM,
+		       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET + 4,
+		       U64_HI(fp->rx_comp_mapping));
+	}
+}
+
+static void bnx2x_init_context(struct bnx2x *bp)
+{
+	int i;
+	for_each_queue(bp, i) {
+
+		struct eth_context *context = bnx2x_sp(bp, context[i].eth);
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		context->xstorm_st_context.tx_bd_page_base_hi =
+			U64_HI(fp->tx_desc_mapping);
+		context->xstorm_st_context.tx_bd_page_base_lo =
+			U64_LO(fp->tx_desc_mapping);
+		context->xstorm_st_context.db_data_addr_hi =
+			U64_HI(fp->tx_prods_mapping);
+		context->xstorm_st_context.db_data_addr_lo =
+			U64_LO(fp->tx_prods_mapping);
+
+		context->ustorm_st_context.rx_bd_page_base_hi =
+			U64_HI(fp->rx_desc_mapping);
+		context->ustorm_st_context.rx_bd_page_base_lo =
+			U64_LO(fp->rx_desc_mapping);
+		context->ustorm_st_context.status_block_id = i;
+		context->ustorm_st_context.sb_index_number =
+			HC_INDEX_U_ETH_RX_CQ_CONS;
+		context->ustorm_st_context.rcq_base_address_hi =
+			U64_HI(fp->rx_comp_mapping);
+		context->ustorm_st_context.rcq_base_address_lo =
+			U64_LO(fp->rx_comp_mapping);
+		context->ustorm_st_context.flags =
+			USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
+		context->ustorm_st_context.mc_alignment_size = 64;
+		/* TBD get the optimal value from the ARCH */
+		context->cstorm_st_context.sb_index_number =
+			HC_INDEX_C_ETH_TX_CQ_CONS;
+		context->cstorm_st_context.status_block_id = i;
+	}
+}
+
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+	int j;
+
+	for_each_queue(bp, j) {
+
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		u16 bd_cons = fp->tx_bd_cons;
+		u16 sw_prod = fp->tx_pkt_prod;
+		u16 sw_cons = fp->tx_pkt_cons;
+
+		BUG_TRAP(fp->tx_buf_ring != NULL);
+
+		while (sw_cons != sw_prod) {
+
+			bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
+			sw_cons++;
+		}
+	}
+}
+
+static void bnx2x_free_rx_skbs(struct bnx2x *bp)
+{
+	int i, j;
+
+	for_each_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		BUG_TRAP(fp->rx_buf_ring != NULL);
+
+		for (i = 0; i < NUM_RX_BD; i++) {
+			struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+			struct sk_buff *skb = rx_buf->skb;
+
+			if (skb == NULL)
+				continue;
+
+			pci_unmap_single(bp->pdev,
+					 pci_unmap_addr(rx_buf, mapping),
+					 bp->rx_buf_use_size,
+					 PCI_DMA_FROMDEVICE);
+
+			rx_buf->skb = NULL;
+
+			dev_kfree_skb(skb);
+		}
+	}
+}
+
+static void bnx2x_free_skbs(struct bnx2x *bp)
+{
+	bnx2x_free_tx_skbs(bp);
+	bnx2x_free_rx_skbs(bp);
+}
+
+
+/* Statistics */
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+#define UPDATE_STAT(s, t) \
+	do { \
+		estats->t += new->s - old->s; \
+		old->s = new->s; \
+	} while (0);
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+	do { \
+		s_lo += a_lo; \
+		s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
+	} while (0);
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+	do { \
+		if (m_lo < s_lo) {	/* underflow */ \
+			d_hi = m_hi - s_hi; \
+			if (d_hi > 0) { /* we can 'loan' 1 */ \
+				d_hi--; \
+				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+			} else {	/* m_hi <= s_hi */ \
+				d_hi = 0; \
+				d_lo = 0; \
+			} \
+		} else {		/* m_lo >= s_lo */ \
+			if (m_hi < s_hi) { \
+			    d_hi = 0; \
+			    d_lo = 0; \
+			} else {	/* m_hi >= s_hi */ \
+			    d_hi = m_hi - s_hi; \
+			    d_lo = m_lo - s_lo; \
+			} \
+		} \
+	} while (0);
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+	do { \
+		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+	} while (0);
+
+#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
+	do { \
+		DIFF_64(diff.hi, new->s_hi, old->s_hi, \
+			diff.lo, new->s_lo, old->s_lo); \
+		old->s_hi = new->s_hi; \
+		old->s_lo = new->s_lo; \
+		ADD_64(estats->t_hi, diff.hi, \
+		       estats->t_lo, diff.lo); \
+	} while (0);
+
+/* sum[hi:lo] += add */
+#define EXTEND_64(s_hi, s_lo, a) \
+	do { \
+		s_lo += a; \
+		s_hi += (s_lo < a) ? 1 : 0; \
+	} while (0);
+
+#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
+	do { \
+		EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
+	} while (0);
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+static inline long bnx2x_hilo(u32 *hiref)
+{
+	u32 lo = *(hiref + 1);
+#if (BITS_PER_LONG == 64)
+	u32 hi = *hiref;
+
+	return HILO_U64(hi, lo);
+#else
+	return lo;
+#endif
+}
+
+/****************************************************************************
+* Init service functions
+****************************************************************************/
+
+/* DMAE command positions used
+ * Port0 BIGMAC 0,1 EMAC 2,3,4 NIG 5 MCP 12
+ * Port1 BIGMAC 6,7 EMAC 8,9,10 NIG 11 MCP 13
+ */
+static void bnx2x_init_mac_stats(struct bnx2x *bp)
+{
+	struct dmae_command *dmae = &bp->dmae;
+	int port = bp->port;
+	u32 mac_addr = (GRCBASE_NIG +
+			(port ? NIG_REGISTERS_INGRESS_BMAC1_MEM :
+				NIG_REGISTERS_INGRESS_BMAC0_MEM));
+
+	memset(dmae, 0, sizeof(struct dmae_command));
+
+	/* BIGMAC_REGISTER_TX_STAT_GTPKT - BIGMAC_REGISTER_TX_STAT_GTBYT */
+	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+			DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
+			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+			DMAE_CMD_ENDIANITY_DW_SWAP |
+			(port? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+	dmae->src_addr_lo = (mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+	dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+		     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+	dmae->comp_addr_lo = (GRCBASE_DMAE +
+			      (port ? DMAE_REGISTERS_GO_C7 :
+				      DMAE_REGISTERS_GO_C1)) >> 2;
+	dmae->comp_val = 1;
+	bnx2x_post_dmae(bp, port? 6 : 0);
+
+	/* BIGMAC_REGISTER_RX_STAT_GR64 - BIGMAC_REGISTER_RX_STAT_GRIPJ */
+	/* command type and comp val taken from first command */
+	dmae->src_addr_lo = (mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo =  U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+				    offsetof(struct bmac_stats, rx_gr64));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+				   offsetof(struct bmac_stats, rx_gr64));
+	dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+		     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+	dmae->comp_addr_lo = (GRCBASE_DMAE +
+			      (port ? DMAE_REGISTERS_GO_C11 :
+				      DMAE_REGISTERS_GO_C5)) >> 2;
+	bnx2x_post_dmae(bp, port? 7 : 1);
+
+
+	mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+
+	/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT) */
+	dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+	dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+	dmae->comp_addr_lo = (GRCBASE_DMAE +
+			      (port ? DMAE_REGISTERS_GO_C9 :
+				      DMAE_REGISTERS_GO_C3)) >> 2;
+	bnx2x_post_dmae(bp, port? 8 : 2);
+
+	/* EMAC_REG_EMAC_RX_STAT_AC_28 */
+	dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+				   offsetof(struct emac_stats,
+					    rx_falsecarriererrors));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+				   offsetof(struct emac_stats,
+					    rx_falsecarriererrors));
+	dmae->len = 1;
+	dmae->comp_addr_lo = (GRCBASE_DMAE +
+			      (port ? DMAE_REGISTERS_GO_C10 :
+				      DMAE_REGISTERS_GO_C4)) >> 2;
+	bnx2x_post_dmae(bp, port? 9 : 3);
+
+	/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT) */
+	dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+				   offsetof(struct emac_stats,
+					    tx_ifhcoutoctets));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+				   offsetof(struct emac_stats,
+					    tx_ifhcoutoctets));
+	dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+	dmae->comp_addr_lo = (GRCBASE_DMAE +
+			      (port ? DMAE_REGISTERS_GO_C11 :
+				      DMAE_REGISTERS_GO_C5)) >> 2;
+	bnx2x_post_dmae(bp, port? 10 : 4);
+
+	/* NIG */
+	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+			DMAE_CMD_ENDIANITY_DW_SWAP |
+			(port? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+	dmae->src_addr_lo = (GRCBASE_NIG +
+			     (port ? NIG_REGISTERS_STAT1_BRB_DISCARD :
+				     NIG_REGISTERS_STAT0_BRB_DISCARD)) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
+	dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
+				    offsetof(struct nig_stats, done));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
+				    offsetof(struct nig_stats, done));
+	dmae->comp_val = 0xffffffff;
+	bnx2x_post_dmae(bp, port? 11 : 5);
+
+	/* MCP */
+	if (bp->fw_mb) {
+		dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+				/* DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | */
+				DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+				DMAE_CMD_ENDIANITY_DW_SWAP |
+				(port? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
+					   sizeof(u32));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
+					   sizeof(u32));
+		dmae->dst_addr_lo = bp->fw_mb >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
+			     sizeof(u32)) >> 2;
+		dmae->comp_addr_lo = 0;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 0;
+		bnx2x_post_dmae(bp, port? 13 : 12);
+	}
+}
+
+static void bnx2x_init_stats(struct bnx2x *bp)
+{
+	int port = bp->port;
+
+	atomic_set(&bp->stats_state, STATS_STATE_DISABLE);
+
+	bp->old_brb_discard = REG_RD(bp, GRCBASE_NIG,
+				     NIG_REGISTERS_STAT0_BRB_DISCARD +
+				     port*0x38);
+
+	memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
+	memset(&bp->net_stats, 0, sizeof(struct net_device_stats));
+
+	bnx2x_init_mac_stats(bp);
+
+	REG_WR32(bp, BAR_XSTRORM_INTMEM, XSTORM_STATS_FLAGS_OFFSET(port), 1);
+	REG_WR32(bp, BAR_XSTRORM_INTMEM, XSTORM_STATS_FLAGS_OFFSET(port)+4, 0);
+
+	REG_WR32(bp, BAR_TSTRORM_INTMEM, TSTORM_STATS_FLAGS_OFFSET(port), 1);
+	REG_WR32(bp, BAR_TSTRORM_INTMEM, TSTORM_STATS_FLAGS_OFFSET(port)+4, 0);
+
+	REG_WR32(bp, BAR_CSTRORM_INTMEM, CSTORM_STATS_FLAGS_OFFSET(port), 0);
+	REG_WR32(bp, BAR_CSTRORM_INTMEM, CSTORM_STATS_FLAGS_OFFSET(port)+4, 0);
+
+	REG_WR32(bp, BAR_XSTRORM_INTMEM,
+		 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
+		 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+	REG_WR32(bp, BAR_XSTRORM_INTMEM,
+		 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
+		 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+
+	REG_WR32(bp, BAR_TSTRORM_INTMEM,
+		 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
+		 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+	REG_WR32(bp, BAR_TSTRORM_INTMEM,
+		 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
+		 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+}
+
+static void bnx2x_stop_stats(struct bnx2x *bp)
+{
+	atomic_set(&bp->stats_state, STATS_STATE_STOP);
+	DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
+	while (atomic_read(&bp->stats_state) != STATS_STATE_DISABLE) {
+		msleep(100);
+	}
+	DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
+}
+
+/****************************************************************************
+* Statistics service functions
+****************************************************************************/
+
+static void bnx2x_update_bmac_stats(struct bnx2x *bp)
+{
+	struct regp diff;
+	struct regp sum;
+	struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
+	struct bmac_stats *old = &bp->old_bmac;
+	struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
+
+	sum.hi = 0;
+	sum.lo = 0;
+
+	UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
+		      tx_gtbyt.lo, total_bytes_transmitted_lo);
+
+	UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
+		      tx_gtmca.lo, total_multicast_packets_transmitted_lo);
+	ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
+
+	UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
+		      tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
+	ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
+
+	UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
+		      tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
+	SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
+	       estats->total_unicast_packets_transmitted_lo, sum.lo);
+
+	UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
+	UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
+	UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
+	UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
+	UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
+	UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
+	UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
+	UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
+	UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
+	UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
+	UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
+
+	UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
+	UPDATE_STAT(rx_grund.lo, runt_packets_received);
+	UPDATE_STAT(rx_grovr.lo, jabber_packets_received);
+	UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
+	UPDATE_STAT(rx_grxcf.lo, control_frames_received);
+	/* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
+	UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
+	UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
+
+	UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
+		      rx_grerb.lo, stat_IfHCInBadOctets_lo);
+	UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
+		      tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
+	UPDATE_STAT(tx_gterr.lo, stat_Dot3statsinternalmactransmiterrors);
+	/* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
+	estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
+}
+
+static void bnx2x_update_emac_stats(struct bnx2x *bp)
+{
+	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
+	struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
+
+	UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
+					     total_bytes_transmitted_lo);
+	UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
+					total_unicast_packets_transmitted_hi,
+					total_unicast_packets_transmitted_lo);
+	UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
+				      total_multicast_packets_transmitted_hi,
+				      total_multicast_packets_transmitted_lo);
+	UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
+				      total_broadcast_packets_transmitted_hi,
+				      total_broadcast_packets_transmitted_lo);
+
+	estats->pause_xon_frames_transmitted += new->tx_outxonsent;
+	estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
+	estats->single_collision_transmit_frames +=
+				new->tx_dot3statssinglecollisionframes;
+	estats->multiple_collision_transmit_frames +=
+				new->tx_dot3statsmultiplecollisionframes;
+	estats->late_collision_frames += new->tx_dot3statslatecollisions;
+	estats->excessive_collision_frames +=
+				new->tx_dot3statsexcessivecollisions;
+	estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
+	estats->frames_transmitted_65_127_bytes +=
+				new->tx_etherstatspkts65octetsto127octets;
+	estats->frames_transmitted_128_255_bytes +=
+				new->tx_etherstatspkts128octetsto255octets;
+	estats->frames_transmitted_256_511_bytes +=
+				new->tx_etherstatspkts256octetsto511octets;
+	estats->frames_transmitted_512_1023_bytes +=
+				new->tx_etherstatspkts512octetsto1023octets;
+	estats->frames_transmitted_1024_1522_bytes +=
+				new->tx_etherstatspkts1024octetsto1522octet;
+	estats->frames_transmitted_1523_9022_bytes +=
+				new->tx_etherstatspktsover1522octets;
+
+	estats->crc_receive_errors += new->rx_dot3statsfcserrors;
+	estats->alignment_errors += new->rx_dot3statsalignmenterrors;
+	estats->false_carrier_detections += new->rx_falsecarriererrors;
+	estats->runt_packets_received += new->rx_etherstatsundersizepkts;
+	estats->jabber_packets_received += new->rx_dot3statsframestoolong;
+	estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
+	estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
+	estats->control_frames_received += new->rx_maccontrolframesreceived;
+	estats->error_runt_packets_received += new->rx_etherstatsfragments;
+	estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
+
+	UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
+					       stat_IfHCInBadOctets_lo);
+	UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
+						stat_IfHCOutBadOctets_lo);
+	estats->stat_Dot3statsinternalmactransmiterrors +=
+				new->tx_dot3statsinternalmactransmiterrors;
+	estats->stat_Dot3StatsCarrierSenseErrors +=
+				new->rx_dot3statscarriersenseerrors;
+	estats->stat_Dot3StatsDeferredTransmissions +=
+				new->tx_dot3statsdeferredtransmissions;
+	estats->stat_FlowControlDone += new->tx_flowcontroldone;
+	estats->stat_XoffStateEntered += new->rx_xoffstateentered;
+}
+
+static int bnx2x_update_storm_stats(struct bnx2x *bp)
+{
+	struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
+	struct tstorm_common_stats *tstats = &stats->tstorm_common;
+	struct xstorm_common_stats *xstats = &stats->xstorm_common;
+	struct nig_stats *nstats = bnx2x_sp(bp, nig);
+	struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
+
+	/* are DMAE stats valid? */
+	if (nstats->done != 0xffffffff) {
+		DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
+		return -1;
+	}
+
+	/* are storm stats valid? */
+	if (tstats->done.hi != 0xffffffff) {
+		DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
+		return -2;
+	}
+	if (xstats->done.hi != 0xffffffff) {
+		DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
+		return -3;
+	}
+
+	estats->total_bytes_received_hi = tstats->total_rcv_bytes.hi;
+	estats->total_bytes_received_lo = tstats->total_rcv_bytes.lo;
+	estats->total_unicast_packets_received_lo = tstats->rcv_unicast_pkts;
+	/* total_unicast_packets_received_hi */
+	estats->total_multicast_packets_received_lo =
+						tstats->rcv_multicast_pkts;
+	/* total_multicast_packets_received_hi */
+	estats->total_broadcast_packets_received_lo =
+						tstats->rcv_broadcast_pkts;
+	/* total_broadcast_packets_received_hi */
+
+	estats->frames_received_64_bytes = tstats->pkts_till_64B;
+	estats->frames_received_65_127_bytes = tstats->pkts_65_to_127B;
+	estats->frames_received_128_255_bytes = tstats->pkts_128_to_255B;
+	estats->frames_received_256_511_bytes = tstats->pkts_256_to_511B;
+	estats->frames_received_512_1023_bytes = tstats->pkts_512_to_1023B;
+	estats->frames_received_1024_1522_bytes = tstats->pkts_1024_to_1522B;
+	estats->frames_received_1523_9022_bytes = tstats->pkts_1523_to_9022B;
+
+	estats->x_total_sent_bytes_hi = xstats->total_sent_bytes.hi;
+	estats->x_total_sent_bytes_lo = xstats->total_sent_bytes.lo;
+	estats->x_total_sent_pkts = xstats->total_sent_pkts;
+
+	estats->t_rcv_unicast_bytes_hi = tstats->rcv_unicast_bytes.hi;
+	estats->t_rcv_unicast_bytes_lo = tstats->rcv_unicast_bytes.lo;
+	estats->t_rcv_broadcast_bytes_hi = tstats->rcv_broadcast_bytes.hi;
+	estats->t_rcv_broadcast_bytes_lo = tstats->rcv_broadcast_bytes.lo;
+	estats->t_rcv_multicast_bytes_hi = tstats->rcv_multicast_bytes.hi;
+	estats->t_rcv_multicast_bytes_lo = tstats->rcv_multicast_bytes.lo;
+	estats->t_total_rcv_pkt = tstats->total_rcv_pkts;
+
+	estats->no_buff_discard = tstats->no_buff_discard;
+	estats->errors_discard = tstats->errors_discard;
+	estats->mac_filter_discard = tstats->mac_filter_discard;
+	estats->ttl0_discard = tstats->ttl0_discard;
+	estats->xxoverflow_discard = tstats->xxoverflow_discard;
+
+	estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
+	bp->old_brb_discard = nstats->brb_discard;
+
+	estats->brb_packet = nstats->brb_packet;
+	estats->brb_truncate = nstats->brb_truncate;
+	estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
+	estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
+	estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
+	estats->mng_discard = nstats->mng_discard;
+	estats->mng_octet_inp = nstats->mng_octet_inp;
+	estats->mng_octet_out = nstats->mng_octet_out;
+	estats->mng_packet_inp = nstats->mng_packet_inp;
+	estats->mng_packet_out = nstats->mng_packet_out;
+	estats->pbf_octets = nstats->pbf_octets;
+	estats->pbf_packet = nstats->pbf_packet;
+	estats->safc_inp = nstats->safc_inp;
+
+	xstats->done.hi = 0;
+	tstats->done.hi = 0;
+	nstats->done = 0;
+
+	return 0;
+}
+
+static void bnx2x_update_net_stats(struct bnx2x *bp)
+{
+	struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
+
+	bp->net_stats.rx_packets =
+		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
+		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
+		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+
+	bp->net_stats.tx_packets =
+		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
+		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
+		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+
+	bp->net_stats.rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
+
+	bp->net_stats.tx_bytes =
+		bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+
+	bp->net_stats.rx_dropped = estats->no_buff_discard;
+	bp->net_stats.tx_dropped = 0;
+
+	bp->net_stats.multicast =
+		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
+
+	bp->net_stats.collisions =
+		estats->single_collision_transmit_frames +
+		estats->multiple_collision_transmit_frames +
+		estats->late_collision_frames +
+		estats->excessive_collision_frames; /*TBD check this*/
+
+	bp->net_stats.rx_length_errors = estats->runt_packets_received +
+					 estats->jabber_packets_received;
+	bp->net_stats.rx_over_errors = estats->no_buff_discard;
+	bp->net_stats.rx_crc_errors = estats->crc_receive_errors;
+	bp->net_stats.rx_frame_errors = estats->alignment_errors;
+	bp->net_stats.rx_fifo_errors = estats->brb_discard +
+				       estats->errors_discard;
+	bp->net_stats.rx_missed_errors = estats->xxoverflow_discard;
+
+	bp->net_stats.rx_errors = bp->net_stats.rx_length_errors +
+				  bp->net_stats.rx_over_errors +
+				  bp->net_stats.rx_crc_errors +
+				  bp->net_stats.rx_frame_errors +
+				  bp->net_stats.rx_fifo_errors;
+
+	bp->net_stats.tx_aborted_errors = estats->late_collision_frames +
+					  estats->excessive_collision_frames;
+	bp->net_stats.tx_carrier_errors = estats->false_carrier_detections;
+	bp->net_stats.tx_fifo_errors = 0;
+	bp->net_stats.tx_heartbeat_errors = 0;
+	bp->net_stats.tx_window_errors = 0;
+
+	bp->net_stats.tx_errors = bp->net_stats.tx_aborted_errors +
+				  bp->net_stats.tx_carrier_errors;
+
+	estats->mac_stx_start = ++estats->mac_stx_end;
+}
+
+static void bnx2x_update_stats(struct bnx2x *bp)
+{
+
+	if (!bnx2x_update_storm_stats(bp)) {
+
+		if (bp->phy_flags & PHY_BMAC_FLAG) {
+			bnx2x_update_bmac_stats(bp);
+
+		} else if (bp->phy_flags & PHY_EMAC_FLAG) {
+			bnx2x_update_emac_stats(bp);
+
+		} else { /* unreached */
+			BNX2X_ERR("no MAC active\n");
+			return;
+		}
+
+		bnx2x_update_net_stats(bp);
+	}
+
+	if (bp->msglevel & NETIF_MSG_TIMER) {
+		printk(KERN_DEBUG "%s:\n"
+		       KERN_DEBUG "tx avail(%x)   rx usage(%x)\n"
+		       KERN_DEBUG "tx pkt (%lx)   rx pkt(%lx)\n"
+		       KERN_DEBUG "tx hc idx(%x)  rx hc index(%x)\n"
+		       KERN_DEBUG "%s (Xoff events %u)\n"
+		       KERN_DEBUG "brb drops                     %u\n"
+		       KERN_DEBUG "tstats->no_buff_discard,      %u\n"
+		       KERN_DEBUG "tstats->errors_discard,       %u\n"
+		       KERN_DEBUG "tstats->mac_filter_discard,   %u\n"
+		       KERN_DEBUG "tstats->xxoverflow_discard,   %u\n"
+		       KERN_DEBUG "tstats->ttl0_discard,         %u\n",
+		       bp->dev->name,
+		       bnx2x_tx_avail(bp->fp),
+		       (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
+		       bp->net_stats.tx_packets, bp->net_stats.rx_packets,
+		       *bp->fp->tx_cons_sb, *bp->fp->rx_cons_sb,
+		       netif_queue_stopped(bp->dev)? "Xoff" : "Xon ",
+		       bp->slowpath->eth_stats.driver_xoff,
+		       bp->slowpath->eth_stats.brb_discard,
+		       bp->slowpath->eth_stats.no_buff_discard,
+		       bp->slowpath->eth_stats.errors_discard,
+		       bp->slowpath->eth_stats.mac_filter_discard,
+		       bp->slowpath->eth_stats.xxoverflow_discard,
+		       bp->slowpath->eth_stats.ttl0_discard);
+	}
+
+	if (bp->state != BNX2X_STATE_OPEN) {
+		DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
+		return;
+	}
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		return;
+	}
+#endif
+
+	if (bp->fw_mb) {
+		REG_WR32(bp, GRCBASE_DMAE,
+			 (bp->port? DMAE_REGISTERS_GO_C13 :
+				    DMAE_REGISTERS_GO_C12), 1);
+	}
+
+	if (atomic_read(&bp->stats_state) != STATS_STATE_ENABLE) {
+		atomic_set(&bp->stats_state, STATS_STATE_DISABLE);
+		return;
+	}
+
+	if (bp->phy_flags & PHY_BMAC_FLAG) {
+		REG_WR32(bp, GRCBASE_DMAE,
+			 (bp->port? DMAE_REGISTERS_GO_C6 :
+				    DMAE_REGISTERS_GO_C0), 1);
+
+	} else if (bp->phy_flags & PHY_EMAC_FLAG) {
+		REG_WR32(bp, GRCBASE_DMAE,
+			 (bp->port? DMAE_REGISTERS_GO_C8 :
+				    DMAE_REGISTERS_GO_C2), 1);
+	}
+
+	if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
+		/* stats ramrod has it's own slot on the spe */
+		bp->spq_left++;
+		bp->stat_pending = 1;
+	}
+}
+
+static void bnx2x_timer(unsigned long data)
+{
+	struct bnx2x *bp = (struct bnx2x *) data;
+
+	if (!netif_running(bp->dev))
+		return;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		goto bnx2x_restart_timer;
+
+	if (poll) {
+		struct bnx2x_fastpath *fp = &bp->fp[0];
+		int rc;
+
+		bnx2x_tx_int(fp, 1000);
+		rc = bnx2x_rx_int(fp, 1000);
+	}
+
+	if (atomic_read(&bp->stats_state) == STATS_STATE_DISABLE)
+		goto bnx2x_restart_timer;
+
+	bnx2x_update_stats(bp);
+
+bnx2x_restart_timer:
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+/* end of Statistics */
+
+
+static int bnx2x_init_nic(struct bnx2x *bp)
+{
+	int j, rc = 0;
+
+	for_each_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+		DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
+		   bp, fp->status_blk, j);
+		fp->index = j;
+		bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, j);
+	}
+
+	bnx2x_init_def_sb(bp, bp->def_status_blk,
+			  bp->def_status_blk_mapping, 0x10);
+	bnx2x_update_coalesce(bp);
+	bnx2x_init_rx_rings(bp);
+	bnx2x_init_tx_ring(bp);
+	bnx2x_init_sp_ring(bp);
+	bnx2x_init_context(bp);
+	bnx2x_init_stats(bp);
+	bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx untill link is up */
+	bnx2x_set_storm_rx_mode(bp);
+	bnx2x_enable_int(bp);
+
+	return rc;
+}
+
+
+static int bnx2x_req_irq(struct bnx2x *bp)
+{
+	int i = 0, rc;
+
+	if (!use_inta) {
+		bp->msix_table[0].entry = 0;
+		for_each_queue(bp, i) {
+			bp->msix_table[i+1].entry = i+1;
+		}
+
+		if ((rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
+					bp->num_queues + 1)) == 0) {
+
+			bp->flags |= USING_MSIX_FLAG;
+
+			DP(NETIF_MSG_IFUP, "about to request sp irq\n");
+
+			rc = request_irq(bp->msix_table[0].vector,
+					 bnx2x_msix_sp_int, 0,
+					 bp->dev->name, bp->dev);
+			DP(NETIF_MSG_IFUP, "returned %d\n", rc);
+			if (rc) {
+				return -1;
+			}
+			for_each_queue(bp, i) {
+				DP(NETIF_MSG_IFUP,
+				   "about to request fp irq#%d\n", i);
+				rc = request_irq(bp->msix_table[i+1].vector,
+						 bnx2x_msix_fp_int, 0,
+						 bp->dev->name, &bp->fp[i]);
+				DP(NETIF_MSG_IFUP, "returned %d\n", rc);
+				if (rc) {
+					goto out_irq;
+				}
+				bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
+			}
+		}
+	}
+	if (!(bp->flags & USING_MSIX_FLAG)) {
+		rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
+				 IRQF_SHARED, bp->dev->name, bp->dev);
+		if (rc) {
+			return -1;
+		}
+		bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
+	}
+
+	return 0;
+
+out_irq:
+	if (bp->flags & USING_MSIX_FLAG) {
+		for_each_queue(bp, i) {
+			DP(NETIF_MSG_IFUP, "failed to load. "
+			   "about to rlease irq#%d\n", i);
+			if (bnx2x_fp(bp, i, state) !=
+			    BNX2X_FP_STATE_CLOSED) {
+				free_irq(bp->msix_table[i+1].vector,
+					 &bp->fp[i]);
+				bnx2x_fp(bp, i, state) =
+					BNX2X_FP_STATE_CLOSED;
+				DP(NETIF_MSG_IFUP, "rleased irq#%d\n", i);
+			}
+		}
+		free_irq(bp->msix_table[0].vector, bp->dev);
+		pci_disable_msix(bp->pdev);
+		bp->flags &= ~USING_MSIX_FLAG;
+	} else {
+		free_irq(bp->pdev->irq, bp->dev);
+	}
+
+	return -1;
+}
+
+static void bnx2x_free_irq( struct bnx2x *bp )
+{
+	int i = 0;
+
+	if (bp->flags & USING_MSIX_FLAG) {
+
+		free_irq(bp->msix_table[0].vector, bp->dev);
+		DP(NETIF_MSG_IFDOWN, "rleased sp irq (%d)\n",
+		   bp->msix_table[0].vector);
+
+		for_each_queue(bp, i) {
+			DP(NETIF_MSG_IFDOWN, "about to rlease fp #%d->%d irq,"
+			   " state(%x)\n", i,
+			   bp->msix_table[i+1].vector, bnx2x_fp(bp, i, state));
+			if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
+
+				free_irq(bp->msix_table[i+1].vector,
+					 &bp->fp[i]);
+				bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
+			} else {
+				DP(NETIF_MSG_IFDOWN, "irq not freed\n");
+			}
+		}
+		pci_disable_msix(bp->pdev);
+		bp->flags &= ~USING_MSIX_FLAG;
+	} else {
+		free_irq(bp->pdev->irq, bp->dev);
+	}
+}
+
+
+static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
+{
+	int rc;
+	int i = 0;
+
+	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+
+	/* Send LOAD_REQUEST command to MCP.
+	   Returns the type of LOAD command: if it is the
+	   first port to be initialized common blocks should be
+	   initialized, otherwise - not.
+	*/
+	if (!nomcp) {
+		rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
+		if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+			return -EBUSY; /* other port in diagnostic mode */
+		}
+	} else {
+		rc = FW_MSG_CODE_DRV_LOAD_COMMON;
+	}
+	
+	bp->num_queues = 1;
+
+	/* Allocate driver's memories */
+	if (bnx2x_alloc_mem(bp))
+		return -ENOMEM;
+
+	/* Initialize the appropriate blocks */
+	if (bnx2x_funcion_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
+		BNX2X_ERR("HW init failed, aborting\n");
+		goto out_error;
+	}
+
+	/* Request IRQ if needed */
+	if (req_irq) {
+		rc = bnx2x_req_irq(bp);
+		if (rc) {
+			goto out_error;
+		}
+	}
+
+	/* Reenable SP tasklet */
+	if (bp->sp_task_en) {
+		tasklet_enable(&bp->sp_task);
+	} else {
+		bp->sp_task_en = 1;
+	}
+
+	atomic_set(&bp->intr_sem, 0);
+	/* Setup NIC internals */
+	rc = bnx2x_init_nic(bp); /* also enables interrupts */
+	if (rc) {
+		goto out_skbs;
+	}
+
+	/* Send LOAD_DONE command to MCP */
+	if (!nomcp) {
+		rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+		DP(NETIF_MSG_IFUP, "rc=0x%x\n", rc);
+		if (!rc) {
+			goto int_disable;
+		}
+	}
+
+	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+	/* Enable Rx interrupt handling before sending the ramrod
+	   as it's completed on Rx FP queue */
+	netif_poll_enable(bp->dev);
+
+	/* On default status block */
+	bnx2x_ack_sb(bp, 16, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+	/* On FP */
+	for_each_queue(bp, i)
+		bnx2x_ack_sb(bp, i, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+	/* SETUP ramrod */
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
+	/* Wait for completion */
+	rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
+	if (rc) {
+		goto stop_netif;
+	}
+
+
+	/* Configure CAM */
+	bnx2x_set_mac_addr(bp);
+
+	bnx2x_phy_init(bp);
+
+	/* Start fast path */
+	if (req_irq) { /* IRQ is only requested from bnx2x_open function
+			  when Tx queue is initialized for the first time */
+		netif_start_queue(bp->dev);
+		if (bp->flags & USING_MSIX_FLAG) {
+			printk(KERN_INFO PFX "%s: using MSI-X\n",
+			       bp->dev->name);
+		}
+
+		/* Otherwise Tx queue should be only reenabled */
+	} else if (netif_running(bp->dev)) {
+		netif_wake_queue(bp->dev);
+		bnx2x_set_rx_mode(bp->dev);
+	}
+
+	/* start the timer */
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+	return 0;
+
+stop_netif:
+	netif_poll_disable(bp->dev);
+int_disable:
+	bnx2x_disable_int_sync(bp);
+out_skbs:
+	bnx2x_free_skbs(bp);
+
+	bnx2x_free_irq(bp);
+
+out_error:
+	bnx2x_free_mem(bp);
+
+	/* TBD we really need to reset the chip
+	   if we want to recover from this */
+	return rc;
+}
+
+/* Called with rtnl_lock */
+static int bnx2x_open(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bnx2x_set_power_state(bp, PCI_D0);
+
+	return bnx2x_nic_load(bp, 1);
+	/* TBD do we revert power state if load fails? */
+}
+
+
+/* Called with netif_tx_lock.
+ * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
+ */
+static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	dma_addr_t mapping;
+	struct eth_tx_bd *txbd;
+	struct eth_tx_parse_bd *pbd = NULL;
+	struct sw_tx_bd *tx_buf;
+	u16 pkt_prod, bd_prod;
+	int nbd, fp_index = 0;
+	struct bnx2x_fastpath *fp;
+
+	fp = &bp->fp[fp_index];
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		return NETDEV_TX_BUSY;
+	}
+#endif
+
+	if (unlikely(bnx2x_tx_avail(bp->fp) <
+		     (skb_shinfo(skb)->nr_frags + 3))) {
+		bp->slowpath->eth_stats.driver_xoff++,
+		netif_stop_queue(dev);
+		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	/*
+	This is a bit ugly. First we use one BD which we mark as start,
+	then for TSO or xsum we have a parsing info BD,
+	and only then we have the rest of the TSO bds.
+	(don't forget to mark the last one as last,
+	and to unmap only AFTER you write to the BD ...)
+	I would like to thank Dov Hirshfeld for this mess.
+	*/
+
+	pkt_prod = fp->tx_pkt_prod++;
+	bd_prod = fp->tx_bd_prod;
+
+	pkt_prod = TX_BD(pkt_prod);
+	bd_prod = TX_BD(bd_prod);
+
+	/* get a tx_buff and first bd */
+	tx_buf = &fp->tx_buf_ring[pkt_prod];
+	txbd = &fp->tx_desc_ring[bd_prod];
+
+	txbd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+
+	/* remeber the first bd of the packet */
+	tx_buf->first_bd = bd_prod;
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "sending pkt=%u @%p, next_idx=%u, bd=%u @%p\n",
+	   pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, txbd);
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 len;
+		struct iphdr *iph = ip_hdr(skb);
+
+		txbd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM ;
+
+		/* turn on parsing and get a bd */
+		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+		pbd = (void *)&fp->tx_desc_ring[bd_prod];
+		
+		len = ((u8 *)ip_hdr(skb) - (u8 *)skb->data)/2;
+
+		/* for now NS flag is not used in Linux */
+		pbd->global_data =
+			len | ((skb->protocol == ETH_P_8021Q)
+			       << ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT);
+
+		pbd->ip_hlen = ip_hdrlen(skb)/2;
+		pbd->total_hlen = len + pbd->ip_hlen;
+
+		if (iph->protocol == IPPROTO_TCP) {
+			struct tcphdr *th = tcp_hdr(skb);
+			txbd->bd_flags.as_bitfield |=
+				ETH_TX_BD_FLAGS_TCP_CSUM;
+			pbd->tcp_flags = htonl(tcp_flag_word(skb))&0xFFFF;
+			pbd->total_hlen += tcp_hdrlen(skb)/2;
+
+			pbd->tcp_pseudo_csum = ntohs(th->check);
+		} else if (iph->protocol == IPPROTO_UDP) {
+			struct udphdr *uh = udp_hdr(skb);
+			txbd->bd_flags.as_bitfield |=
+				ETH_TX_BD_FLAGS_TCP_CSUM;
+			pbd->total_hlen += 4;
+			pbd->global_data |= ETH_TX_PARSE_BD_UDP_FLG;
+
+			/* HW bug: we need to subtract 10 bytes before the
+			 * UDP header from the csum
+			 */
+			uh->check = (u16) ~csum_fold(csum_sub(uh->check,
+				csum_partial(((u8 *)(uh)-10), 10, 0)));
+		}
+	}
+
+	if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
+		txbd->vlan = vlan_tx_tag_get(skb);
+		txbd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
+	}
+#ifdef BNX2X_STOP_ON_ERROR
+	else {
+		txbd->vlan = pkt_prod;
+	}
+#endif
+
+	mapping = pci_map_single(bp->pdev, skb->data,
+				 skb->len, PCI_DMA_TODEVICE);
+
+	txbd->addr_hi = U64_HI(mapping);
+	txbd->addr_lo = U64_LO(mapping);
+	txbd->hdr_nbds = 1;
+	txbd->nbd = nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
+	txbd->nbytes = skb_headlen(skb);
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "first bd @%p, addr(%x:%x) hdr_nbds(%d) nbd(%d)"
+	   " nbytes(%d) flags(%x) vlan(%u)\n",
+	   txbd, txbd->addr_hi, txbd->addr_lo, txbd->hdr_nbds, txbd->nbd,
+	   txbd->nbytes, txbd->bd_flags.as_bitfield, txbd->vlan);
+
+	if ((skb_shinfo(skb)->gso_size) &&
+	    (skb->len > (bp->dev->mtu + ETH_HLEN))) {
+
+		int hlen = 2*pbd->total_hlen;
+	DP(NETIF_MSG_TX_QUEUED,
+	   "TSO packet len %d, hlen %d, total len %d, tso size %d\n",
+	   skb->len, hlen, skb_headlen(skb), skb_shinfo(skb)->gso_size);
+
+		txbd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+
+		if (txbd->nbytes > hlen) {
+		/* we split the first bd into headers and data bds
+		 * to ease the pain of our fellow micocode engineers
+		 * we use one mapping for both bds
+		 * So far this has only been observed to happen
+		 * in Other Operating Systems(TM)
+		 */
+
+			/* first fix first bd */
+			nbd++;
+			txbd->nbd++;
+			txbd->nbytes = hlen;
+
+			/* we only print this as an error
+			 * because we don't think this will ever happen.
+			 */
+			BNX2X_ERR("TSO split headr size is %d (%x:%x)"
+				  " nbd %d\n", txbd->nbytes, txbd->addr_hi,
+				  txbd->addr_lo, txbd->nbd);
+
+			/* now get a new data bd
+			 * (after the pbd) and fill it */
+			bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+			txbd = &fp->tx_desc_ring[bd_prod];
+
+#ifdef BNX2X_STOP_ON_ERROR
+			txbd->vlan = pkt_prod;
+#endif
+			txbd->addr_hi = U64_HI(mapping);
+			txbd->addr_lo = U64_LO(mapping) + hlen;
+			txbd->nbytes = skb_headlen(skb) - hlen;
+			/* this marks the bd
+			 * as one that has no individual mapping
+			 * the FW ignors this flag in a bd not maked start
+			 */
+			txbd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
+			DP(NETIF_MSG_TX_QUEUED,
+			   "TSO split data size is %d (%x:%x)\n",
+			   txbd->nbytes, txbd->addr_hi, txbd->addr_lo);
+		}
+
+		if (!pbd) {
+			/* supposed to be unreached
+			 * (and therefore not handled properly...)
+			 */
+			BNX2X_ERR("LSO with no PBD\n");
+			BUG();
+		}
+
+		pbd->lso_mss = skb_shinfo(skb)->gso_size;
+		pbd->tcp_send_seq = ntohl(tcp_hdr(skb)->seq);
+		pbd->ip_id  = ntohs(ip_hdr(skb)->id);
+		pbd->tcp_pseudo_csum =
+			ntohs(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+						 ip_hdr(skb)->daddr,
+						 0, IPPROTO_TCP, 0));
+		pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
+	}
+
+	{
+		int i;
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+			bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+			txbd = &fp->tx_desc_ring[bd_prod];
+
+			mapping = pci_map_page(bp->pdev, frag->page,
+					       frag->page_offset,
+					       frag->size, PCI_DMA_TODEVICE);
+
+			txbd->addr_hi = U64_HI(mapping);
+			txbd->addr_lo = U64_LO(mapping);
+			txbd->nbytes = frag->size;
+			txbd->bd_flags.as_bitfield = 0;
+#ifdef BNX2X_STOP_ON_ERROR
+			txbd->vlan = pkt_prod;
+#endif
+			DP(NETIF_MSG_TX_QUEUED, "frag (%d) bd @%p,"
+			   " addr(%x:%x) nbytes(%d) flags(%x)\n",
+			   i, txbd, txbd->addr_hi, txbd->addr_lo,
+			   txbd->nbytes, txbd->bd_flags.as_bitfield);
+
+		}  /* for */
+	}
+
+	/* now at last mark the bd as the last bd */
+	txbd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
+
+	DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags(%x)\n",
+	   txbd, txbd->bd_flags.as_bitfield);
+
+	tx_buf->skb = skb;
+
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+	/* now send a tx doorbell, counting the next bd
+	 * if the packet contains or ends with it
+	 */
+	if (TX_BD_POFF(bd_prod) < nbd)
+		nbd++;
+
+	if (pbd) {
+		DP(NETIF_MSG_TX_QUEUED,
+		   "PBD @%p, ip_data=%x , ip_hlen %u, ip_id %u, lso_mss %u,"
+		   " tcp_flags %x, xsum %x, seq %u, hlen %u)\n",
+		   pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
+		   pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
+		   pbd->tcp_send_seq, pbd->total_hlen);
+	}
+
+	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd=%u, bd=%d\n", nbd, bd_prod);
+
+	fp->hw_tx_prods->bds_prod += nbd;
+	fp->hw_tx_prods->packets_prod++;
+	DOORBELL(bp, fp_index, 0);
+
+	mmiowb();
+
+	fp->tx_bd_prod = bd_prod;
+	dev->trans_start = jiffies;
+
+	if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
+		netif_stop_queue(dev);
+		bp->slowpath->eth_stats.driver_xoff++;
+		if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
+			netif_wake_queue(dev);
+	}
+	fp->tx_pkt++;
+	return NETDEV_TX_OK;
+}
+
+
+
+static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
+{
+	u32 reset_code = 0;
+	int rc /*, j */;
+	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+
+	/* Calling flush_scheduled_work() may deadlock because
+	 * linkwatch_event() may be on the workqueue and it will try to get
+	 * the rtnl_lock which we are holding.
+	 */
+	while (bp->in_reset_task)
+		msleep(1);
+
+	/* Delete the timer: do it before disabling interrupts, as it
+	   may be stil STAT_QUERY ramrod pending after stopping the timer */
+	del_timer_sync(&bp->timer);
+
+	/* Wait until stat ramrod returns and all SP tasks complete */
+	while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING)) {
+		msleep(1);
+	}
+
+	/* Stop fast path, disable MAC, disable interrupts */
+	bnx2x_netif_stop(bp);
+
+
+	if (bp->flags & NO_WOL_FLAG)
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+	else if (bp->wol) {
+		u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1 ;
+		u8 *mac_addr = bp->dev->dev_addr;
+		u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
+			   EMAC_MODE_ACPI_RCVD);
+
+		REG_WR(bp, emac_base, EMAC_REG_EMAC_MODE, val);
+
+		val = (mac_addr[0] << 8) | mac_addr[1];
+
+		REG_WR(bp, emac_base, EMAC_REG_EMAC_MAC_MATCH, val);
+
+		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+		      (mac_addr[4] << 8) | mac_addr[5];
+
+		REG_WR(bp, emac_base, EMAC_REG_EMAC_MAC_MATCH + 4, val);
+
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+	} else
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+
+	/* Send HALT ramrod */
+	bp->fp[0].state = BNX2X_FP_STATE_HALTING;
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
+	/* Wait for completion */
+	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
+			       &(bp->fp[0].state), 1);
+	if (rc) {
+		goto error;
+	}
+
+	/* Remember cstorm producer in DSB to track it */
+	bp->dsb_prod_sp_idx = *bp->dsb_sp_prod;
+	/* Send CFC_DELETE ramrod */
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_LEADING_CFC_DEL, 0, 0, 0, 1);
+	/* Wait for completion to arrive. Do nothing as we are going to reset
+	   the chip a few lines later */
+	while ( bp->dsb_prod_sp_idx == *bp->dsb_sp_prod ) {
+		msleep(1);
+	}
+
+error:
+
+	if (!nomcp) {
+		rc = bnx2x_fw_command(bp, reset_code);
+	} else {
+		rc =  FW_MSG_CODE_DRV_UNLOAD_COMMON;
+	}
+
+	/* Release IRQs */
+	if (fre_irq)
+		bnx2x_free_irq(bp);
+
+	/* Reset the chip */
+	bnx2x_reset_chip(bp, rc);
+
+	/* Report UNLOAD_DONE to MCP */
+	if (!nomcp) {
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+	}
+	/* Free SKBs and driver internals */
+	bnx2x_free_skbs(bp);
+	bnx2x_free_mem(bp);
+
+	bp->state = BNX2X_STATE_CLOSED;
+	/* Set link down */
+	bp->link_up = 0;
+	netif_carrier_off(bp->dev);
+
+	return 0;
+}
+
+static void bnx2x_reset_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	BNX2X_ERR("reset taks called but STOP_ON_ERROR defined"
+		  " so reset not done to allow debug dump,\n"
+		  " you will need to reboot when done\n");
+	return;
+#endif
+
+	if (!netif_running(bp->dev))
+		return;
+
+	bp->in_reset_task = 1;
+	bnx2x_netif_stop(bp);
+
+	bnx2x_nic_unload(bp, 0);
+	bnx2x_nic_load(bp, 0);
+
+	bp->in_reset_task = 0;
+}
+
+static void bnx2x_tx_timeout(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+#ifdef BNX2X_STOP_ON_ERROR
+	if (!bp->panic) {
+		bnx2x_panic();
+	}
+#endif
+
+	/* This allows the netif to be shutdown gracefully before resetting */
+	schedule_work(&bp->reset_task);
+}
+
+/* Called with rtnl_lock */
+static int bnx2x_close(struct net_device *dev)
+{
+	int rc;
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* Unload the driver, release IRQs */
+	rc = bnx2x_nic_unload(bp, 1);
+	if (rc) {
+		BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
+		return rc;
+	}
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
+	return 0;
+}
+
+#ifdef BCM_VLAN
+/* Called with rtnl_lock */
+static void bnx2x_vlan_rx_register(struct net_device *dev,
+				   struct vlan_group *vlgrp)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bp->vlgrp = vlgrp;
+	if (netif_running(dev))
+		bnx2x_set_rx_mode(dev);
+
+}
+
+#endif
+
+static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	return &bp->net_stats;
+}
+
+
+/* ethtool */
+
+/****************************************************************************
+* Init service functions
+****************************************************************************/
+
+static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
+{
+	int port = bp->port;
+	u32 ext_phy_type;
+
+	bp->phy_flags = 0;
+	bp->supported = (SUPPORTED_Autoneg |
+			 SUPPORTED_Pause |
+			 SUPPORTED_Asym_Pause);
+
+	switch (switch_cfg) {
+	case SWITCH_CFG_1G:
+		BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
+
+		ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
+				       ext_phy_type);
+
+			bp->supported |= (SUPPORTED_10baseT_Half |
+					  SUPPORTED_10baseT_Full |
+					  SUPPORTED_100baseT_Half |
+					  SUPPORTED_100baseT_Full |
+					  SUPPORTED_1000baseT_Full |
+					  SUPPORTED_2500baseT_Full |
+					  SUPPORTED_TP | SUPPORTED_FIBRE);
+			break;
+
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
+				       ext_phy_type);
+
+			bp->phy_flags |= PHY_SGMII_FLAG;
+
+			bp->supported |= (/* SUPPORTED_10baseT_Half |
+					     SUPPORTED_10baseT_Full |
+					     SUPPORTED_100baseT_Half |
+					     SUPPORTED_100baseT_Full |*/
+					  SUPPORTED_1000baseT_Full |
+					  SUPPORTED_TP | SUPPORTED_FIBRE);
+			break;
+
+		default:
+			BNX2X_ERR("NVRAM config error. "
+				  "BAD SerDes ext_phy_config 0x%x\n",
+				  bp->ext_phy_config);
+			return;
+		}
+
+		bp->phy_addr = REG_RD(bp, GRCBASE_NIG,
+				      NIG_REGISTERS_SERDES0_CTRL_PHY_ADDR +
+				      port*0x10);
+		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
+		break;
+
+	case SWITCH_CFG_10G:
+		BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
+
+		bp->phy_flags |= PHY_XGSX_FLAG;
+
+		ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
+				       ext_phy_type);
+
+			bp->supported |= (SUPPORTED_10baseT_Half |
+					  SUPPORTED_10baseT_Full |
+					  SUPPORTED_100baseT_Half |
+					  SUPPORTED_100baseT_Full |
+					  SUPPORTED_1000baseT_Full |
+					  SUPPORTED_2500baseT_Full |
+					  SUPPORTED_10000baseT_Full |
+					  SUPPORTED_TP | SUPPORTED_FIBRE);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (8705/6)\n",
+				       ext_phy_type);
+
+			bp->supported |= (SUPPORTED_10000baseT_Full |
+					  SUPPORTED_FIBRE);
+			break;
+
+		default:
+			BNX2X_ERR("NVRAM config error. "
+				  "BAD XGXS ext_phy_config 0x%x\n",
+				  bp->ext_phy_config);
+			return;
+		}
+
+		bp->phy_addr = REG_RD(bp, GRCBASE_NIG,
+				      NIG_REGISTERS_XGXS0_CTRL_PHY_ADDR +
+				      port*0x18);
+		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
+
+		bp->ser_lane = ((bp->lane_config &
+				 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+				PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+		bp->rx_lane_swap = ((bp->lane_config &
+				     PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+				    PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+		bp->tx_lane_swap = ((bp->lane_config &
+				     PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+				    PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+		BNX2X_DEV_INFO("rx_lane_swap 0x%x  tx_lane_swap 0x%x\n",
+			       bp->rx_lane_swap, bp->tx_lane_swap);
+		break;
+
+	default:
+		BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+			  bp->link_config);
+		return;
+	}
+
+	/* mask what we support according to speed_cap_mask */
+	if (!(bp->speed_cap_mask &
+	       PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
+		bp->supported &= ~SUPPORTED_10baseT_Half;
+	}
+	if (!(bp->speed_cap_mask &
+	       PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
+		bp->supported &= ~SUPPORTED_10baseT_Full;
+	}
+	if (!(bp->speed_cap_mask &
+	       PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
+		bp->supported &= ~SUPPORTED_100baseT_Half;
+	}
+	if (!(bp->speed_cap_mask &
+	       PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
+		bp->supported &= ~SUPPORTED_100baseT_Full;
+	}
+	if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
+		bp->supported &= ~(SUPPORTED_1000baseT_Half |
+				   SUPPORTED_1000baseT_Full);
+	}
+	if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
+		bp->supported &= ~SUPPORTED_2500baseT_Full;
+	}
+	if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+		bp->supported &= ~SUPPORTED_10000baseT_Full;
+	}
+	BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
+}
+
+static void bnx2x_link_settings_requested(struct bnx2x *bp)
+{
+	bp->req_autoneg = 0;
+	bp->req_duplex = DUPLEX_FULL;
+	switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+	case PORT_FEATURE_LINK_SPEED_AUTO:
+		bp->req_autoneg |= AUTONEG_SPEED;
+		bp->req_line_speed = 0;
+		bp->advertising = bp->supported;
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_10M_FULL:
+		if (bp->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
+			bp->req_line_speed = SPEED_10;
+			bp->advertising = (ADVERTISED_10baseT_Full |
+					   ADVERTISED_TP);
+		} else {
+			BNX2X_ERR("NVRAM config error. "
+				  "Invalid link_config 0x%x"
+				  "  speed_cap_mask 0x%x\n",
+				  bp->link_config, bp->speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_10M_HALF:
+		if (bp->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+			bp->req_line_speed = SPEED_10;
+			bp->req_duplex = DUPLEX_HALF;
+			bp->advertising = (ADVERTISED_10baseT_Half |
+					   ADVERTISED_TP);
+		} else {
+			BNX2X_ERR("NVRAM config error. "
+				  "Invalid link_config 0x%x"
+				  "  speed_cap_mask 0x%x\n",
+				  bp->link_config, bp->speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_100M_FULL:
+		if (bp->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+			bp->req_line_speed = SPEED_100;
+			bp->advertising = (ADVERTISED_100baseT_Full |
+					   ADVERTISED_TP);
+		} else {
+			BNX2X_ERR("NVRAM config error. "
+				  "Invalid link_config 0x%x"
+				  "  speed_cap_mask 0x%x\n",
+				  bp->link_config, bp->speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_100M_HALF:
+		if (bp->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+			bp->req_line_speed = SPEED_100;
+			bp->req_duplex = DUPLEX_HALF;
+			bp->advertising = (ADVERTISED_100baseT_Half |
+					   ADVERTISED_TP);
+		} else {
+			BNX2X_ERR("NVRAM config error. "
+				  "Invalid link_config 0x%x"
+				  "  speed_cap_mask 0x%x\n",
+				  bp->link_config, bp->speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_1G:
+		if (bp->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
+			bp->req_line_speed = SPEED_1000;
+			bp->advertising = (ADVERTISED_1000baseT_Full |
+					   ADVERTISED_TP);
+		} else {
+			BNX2X_ERR("NVRAM config error. "
+				  "Invalid link_config 0x%x"
+				  "  speed_cap_mask 0x%x\n",
+				  bp->link_config, bp->speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_2_5G:
+		if (bp->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) {
+			bp->req_line_speed = SPEED_2500;
+			bp->advertising = (ADVERTISED_2500baseT_Full |
+					   ADVERTISED_TP);
+		} else {
+			BNX2X_ERR("NVRAM config error. "
+				  "Invalid link_config 0x%x"
+				  "  speed_cap_mask 0x%x\n",
+				  bp->link_config, bp->speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_10G_CX4:
+	case PORT_FEATURE_LINK_SPEED_10G_KX4:
+	case PORT_FEATURE_LINK_SPEED_10G_KR:
+		if (!(bp->phy_flags & PHY_XGSX_FLAG)) {
+			BNX2X_ERR("NVRAM config error. "
+				  "Invalid link_config 0x%x"
+				  "  phy_flags 0x%x\n",
+				  bp->link_config, bp->phy_flags);
+			return;
+		}
+		if (bp->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
+			bp->req_line_speed = SPEED_10000;
+			bp->advertising = (ADVERTISED_10000baseT_Full |
+					   ADVERTISED_FIBRE);
+		} else {
+			BNX2X_ERR("NVRAM config error. "
+				  "Invalid link_config 0x%x"
+				  "  speed_cap_mask 0x%x\n",
+				  bp->link_config, bp->speed_cap_mask);
+			return;
+		}
+		break;
+
+	default:
+		BNX2X_ERR("NVRAM config error. "
+			  "BAD link speed link_config 0x%x\n",
+			  bp->link_config);
+		bp->req_autoneg |= AUTONEG_SPEED;
+		bp->req_line_speed = 0;
+		bp->advertising = bp->supported;
+		break;
+	}
+	BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d\n",
+		       bp->req_line_speed, bp->req_duplex);
+
+	bp->req_flow_ctrl = (bp->link_config &
+			     PORT_FEATURE_FLOW_CONTROL_MASK);
+	/* Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+	switch (bp->req_flow_ctrl) {
+	case FLOW_CTRL_AUTO:
+		bp->req_autoneg |= AUTONEG_FLOW_CTRL;
+		bp->pause_mode = PAUSE_BOTH;
+		bp->advertising |= (ADVERTISED_Pause |
+				    ADVERTISED_Asym_Pause);
+		break;
+
+	case FLOW_CTRL_TX:
+		bp->pause_mode = PAUSE_ASYMMETRIC;
+		bp->advertising |= ADVERTISED_Asym_Pause;
+		break;
+
+	case FLOW_CTRL_RX:
+		bp->pause_mode = PAUSE_BOTH;
+		bp->advertising |= (ADVERTISED_Pause |
+				    ADVERTISED_Asym_Pause);
+		break;
+
+	case FLOW_CTRL_BOTH:
+		bp->pause_mode = PAUSE_SYMMETRIC;
+		bp->advertising |= ADVERTISED_Pause;
+		break;
+
+	case FLOW_CTRL_NONE:
+	default:
+		bp->pause_mode = PAUSE_NONE;
+		bp->advertising &= ~(ADVERTISED_Pause |
+				     ADVERTISED_Asym_Pause);
+		break;
+	}
+	BNX2X_DEV_INFO("req_autoneg 0x%x  req_flow_ctrl 0x%x\n"
+	     KERN_INFO "  pause_mode %d  advertising 0x%x\n",
+		       bp->req_autoneg, bp->req_flow_ctrl,
+		       bp->pause_mode, bp->advertising);
+}
+
+static void bnx2x_get_hwinfo(struct bnx2x *bp)
+{
+	u32 val, val2, val3, val4, id;
+	int port = bp->port;
+	u32 switch_cfg;
+
+	bp->shmem_base = REG_RD(bp, GRCBASE_MISC,
+				MISC_REGISTERS_SHARED_MEM_ADDR);
+	BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
+
+	/* Get the chip revision id and number. */
+	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+	val = REG_RD(bp, GRCBASE_MISC, MISC_REGISTERS_CHIP_NUM);
+	id = ((val & 0xffff) << 16);
+	val = REG_RD(bp, GRCBASE_MISC, MISC_REGISTERS_CHIP_REV);
+	id |= ((val & 0xf) << 12);
+	val = REG_RD(bp, GRCBASE_MISC, MISC_REGISTERS_CHIP_METAL);
+	id |= ((val & 0xff) << 4);
+	REG_RD(bp, GRCBASE_MISC, MISC_REGISTERS_BOND_ID);
+	id |= (val & 0xf);
+	bp->chip_id = id;
+	BNX2X_DEV_INFO("chip ID is %x\n", id);
+
+	if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
+		BNX2X_DEV_INFO("MCP not active\n");
+		nomcp = 1;
+		goto set_mac;
+	}
+
+	val = SHMEM_RD(bp, validity_map[port]);
+	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
+		BNX2X_ERR("MCP validity signature bad\n");
+	}
+
+	bp->fw_seq = (SHMEM_RD(bp, drv_fw_mb[port].drv_mb_header) &
+		      DRV_MSG_SEQ_NUMBER_MASK);
+
+	bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+
+	bp->serdes_config =
+		SHMEM_RD(bp, dev_info.port_hw_config[bp->port].serdes_config);
+	bp->lane_config =
+		SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+	bp->ext_phy_config =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].external_phy_config);
+	bp->speed_cap_mask =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].speed_capability_mask);
+
+	bp->link_config =
+		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+
+	BNX2X_DEV_INFO("hw_config (%08x)  serdes_config (%08x)\n"
+	     KERN_INFO "  lane_config (%08x)  ext_phy_config (%08x)\n"
+	     KERN_INFO "  speed_cap_mask (%08x)  link_config (%08x)"
+		       "  fw_seq (%08x)\n",
+		       bp->hw_config, bp->serdes_config, bp->lane_config,
+		       bp->ext_phy_config, bp->speed_cap_mask,
+		       bp->link_config, bp->fw_seq);
+
+	switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
+	bnx2x_link_settings_supported(bp, switch_cfg);
+
+	bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
+	/* for now disable cl73 */
+	bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
+	BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
+
+	bnx2x_link_settings_requested(bp);
+
+	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+	val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+	bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
+	bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
+	bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
+	bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
+	bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
+	bp->dev->dev_addr[5] = (u8)(val & 0xff);
+	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
+
+	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+	val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+	val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+
+	printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
+	       val, val2, val3, val4);
+
+	/* bc rev */
+	bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
+	BNX2X_DEV_INFO("bc_rev %X\n", val);
+	if (val != BNX2X_BC_REV) {
+		BNX2X_ERR("This driver needs bc_rev %X but found %X,"
+			  " please upgrade BC\n", BNX2X_BC_REV, val);
+		/* for now only warn, later we might need to enforce this */
+	}
+
+	val = REG_RD(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_CFG4);
+	bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
+	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+		       bp->flash_size, bp->flash_size);
+
+	return;
+
+set_mac: /* only supposed to happen on emulation/FPGA */
+	BNX2X_ERR("warning constant MAC workaround active\n");
+	bp->dev->dev_addr[0] = 0;
+	bp->dev->dev_addr[1] = 0x50;
+	bp->dev->dev_addr[2] = 0xc2;
+	bp->dev->dev_addr[3] = 0x2c;
+	bp->dev->dev_addr[4] = 0x71;
+	bp->dev->dev_addr[5] = port? 0x0d:0x0e;
+
+	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
+}
+
+
+/****************************************************************************
+* ethtool service functions
+****************************************************************************/
+/* All ethtool functions called with rtnl_lock */
+
+static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	cmd->supported = bp->supported;
+	cmd->advertising = bp->advertising;
+
+	if (netif_carrier_ok(dev)) {
+		cmd->speed = bp->line_speed;
+		cmd->duplex = bp->duplex;
+	} else {
+		cmd->speed = bp->req_line_speed;
+		cmd->duplex = bp->req_duplex;
+	}
+
+	if (bp->phy_flags & PHY_XGSX_FLAG) {
+		cmd->port = PORT_FIBRE;
+	} else {
+		cmd->port = PORT_TP;
+	}
+
+	cmd->phy_address = bp->phy_addr;
+	cmd->transceiver = XCVR_INTERNAL;
+
+	if (bp->req_autoneg & AUTONEG_SPEED) {
+		cmd->autoneg = AUTONEG_ENABLE;
+	} else {
+		cmd->autoneg = AUTONEG_DISABLE;
+	}
+
+	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
+	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+	return 0;
+}
+
+static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 advertising;
+
+	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
+	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+	switch (cmd->port) {
+	case PORT_TP:
+		if (!(bp->supported & SUPPORTED_TP)) {
+			return -EINVAL;
+		}
+		if (bp->phy_flags & PHY_XGSX_FLAG) {
+			bnx2x_link_reset(bp);
+			bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
+			bnx2x_phy_deassert(bp);
+		}
+		break;
+
+	case PORT_FIBRE:
+		if (!(bp->supported & SUPPORTED_FIBRE)) {
+			return -EINVAL;
+		}
+		if (!(bp->phy_flags & PHY_XGSX_FLAG)) {
+			bnx2x_link_reset(bp);
+			bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
+			bnx2x_phy_deassert(bp);
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		/* advertise the requested speed and duplex if supported */
+		cmd->advertising &= bp->supported;
+
+		bp->req_autoneg |= AUTONEG_SPEED;
+		bp->req_line_speed = 0;
+		bp->req_duplex = DUPLEX_FULL;
+		bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
+
+	} else { /* forced mode */
+		/* advertise the requested speed and duplex if supported */
+		switch (cmd->speed) {
+		case SPEED_10:
+			if (cmd->duplex == DUPLEX_FULL) {
+				if (!(bp->supported &
+						SUPPORTED_10baseT_Full)) {
+					return -EINVAL;
+				}
+				advertising = (ADVERTISED_10baseT_Full |
+					       ADVERTISED_TP);
+			} else {
+				if (!(bp->supported &
+						SUPPORTED_10baseT_Half)) {
+					return -EINVAL;
+				}
+				advertising = (ADVERTISED_10baseT_Half |
+					       ADVERTISED_TP);
+			}
+			break;
+
+		case SPEED_100:
+			if (cmd->duplex == DUPLEX_FULL) {
+				if (!(bp->supported &
+						SUPPORTED_100baseT_Full)) {
+					return -EINVAL;
+				}
+				advertising = (ADVERTISED_100baseT_Full |
+					       ADVERTISED_TP);
+			} else {
+				if (!(bp->supported &
+						SUPPORTED_100baseT_Half)) {
+					return -EINVAL;
+				}
+				advertising = (ADVERTISED_100baseT_Half |
+					       ADVERTISED_TP);
+			}
+			break;
+
+		case SPEED_1000:
+			if (cmd->duplex != DUPLEX_FULL) {
+				return -EINVAL;
+			}
+			if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
+				return -EINVAL;
+			}
+			advertising = (ADVERTISED_1000baseT_Full |
+				       ADVERTISED_TP);
+			break;
+
+		case SPEED_2500:
+			if (cmd->duplex != DUPLEX_FULL) {
+				return -EINVAL;
+			}
+			if (!(bp->supported & SUPPORTED_2500baseT_Full)) {
+				return -EINVAL;
+			}
+			advertising = (ADVERTISED_2500baseT_Full |
+				       ADVERTISED_TP);
+			break;
+
+		case SPEED_10000:
+			if (cmd->duplex != DUPLEX_FULL) {
+				return -EINVAL;
+			}
+			if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
+				return -EINVAL;
+			}
+			advertising = (ADVERTISED_10000baseT_Full |
+				       ADVERTISED_FIBRE);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+		bp->req_autoneg &= ~AUTONEG_SPEED;
+		bp->req_line_speed = cmd->speed;
+		bp->req_duplex = cmd->duplex;
+		bp->advertising = advertising;
+	}
+
+	DP(NETIF_MSG_LINK, "req_autoneg 0x%x  req_line_speed %d\n"
+	   DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
+	   bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
+	   bp->advertising);
+
+	bnx2x_stop_stats(bp);
+	bnx2x_link_initialize(bp);
+
+	return 0;
+}
+
+static void bnx2x_get_drvinfo(struct net_device *dev,
+			      struct ethtool_drvinfo *info)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_MODULE_NAME);
+	strcpy(info->version, DRV_MODULE_VERSION);
+	snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
+		 BCM_FW_MAJOR_VERSION, BCM_FW_MINOR_VERSION,
+		 BCM_FW_REVISION_VERSION, BCM_FW_COMPILE_FLAGS,
+		 bp->bc_ver);
+	strcpy(info->bus_info, pci_name(bp->pdev));
+	info->n_stats = BNX2X_NUM_STATS;
+	info->testinfo_len = BNX2X_NUM_TESTS;
+	info->eedump_len = bp->flash_size;
+	info->regdump_len = 0;
+}
+
+static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->flags & NO_WOL_FLAG) {
+		wol->supported = 0;
+		wol->wolopts = 0;
+	} else {
+		wol->supported = WAKE_MAGIC;
+		if (bp->wol)
+			wol->wolopts = WAKE_MAGIC;
+		else
+			wol->wolopts = 0;
+	}
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	if (wol->wolopts & WAKE_MAGIC) {
+		if (bp->flags & NO_WOL_FLAG)
+			return -EINVAL;
+
+		bp->wol = 1;
+	} else {
+		bp->wol = 0;
+	}
+	return 0;
+}
+
+static u32 bnx2x_get_msglevel(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->msglevel;
+}
+
+static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (capable(CAP_NET_ADMIN))
+		bp->msglevel = level;
+}
+
+static int bnx2x_nway_reset(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bnx2x_stop_stats(bp);
+	bnx2x_link_initialize(bp);
+
+	return 0;
+}
+
+static int bnx2x_get_eeprom_len(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->flash_size;
+}
+
+static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
+{
+	int port = bp->port;
+	int count, i;
+	u32 val = 0;
+
+	/* adjust timeout for emulation/FPGA */
+	count = NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV(bp) == CHIP_REV_EMUL)
+		count *= 100;
+
+	/* request access to nvram interface */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_SW_ARB,
+	       (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+
+	for (i = 0; i < count*10; i++) {
+		val = REG_RD(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_SW_ARB);
+		if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+			break;
+		}
+
+		udelay(5);
+	}
+
+	if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+		DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int bnx2x_release_nvram_lock(struct bnx2x *bp)
+{
+	int port = bp->port;
+	int count, i;
+	u32 val = 0;
+
+	/* adjust timeout for emulation/FPGA */
+	count = NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV(bp) == CHIP_REV_EMUL)
+		count *= 100;
+
+	/* relinquish nvram interface */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_SW_ARB,
+	       (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+
+	for (i = 0; i < count*10; i++) {
+		val = REG_RD(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_SW_ARB);
+		if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+			break;
+		}
+
+		udelay(5);
+	}
+
+	if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+		DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void bnx2x_enable_nvram_access(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+	/* enable both bits, even on read */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+	       (val | MCPR_NVM_ACCESS_ENABLE_EN |
+		      MCPR_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static void bnx2x_disable_nvram_access(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+	/* disable both bits, even after read */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+	       (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
+			MCPR_NVM_ACCESS_ENABLE_WR_EN)));
+}
+
+static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
+				  u32 cmd_flags)
+{
+	int rc;
+	int count, i;
+	u32 val;
+
+	/* build the command word */
+	cmd_flags |= MCPR_NVM_COMMAND_DOIT;
+
+	/* need to clear DONE bit separately */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_COMMAND,
+	       MCPR_NVM_COMMAND_DONE);
+
+	/* address of the NVRAM to read from */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_ADDR,
+	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+	/* issue a read command */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+	/* adjust timeout for emulation/FPGA */
+	count = NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV(bp) == CHIP_REV_EMUL)
+		count *= 100;
+
+	/* wait for completion */
+	rc = -EBUSY;
+	for (i = 0; i < count; i++) {
+		udelay(5);
+		val = REG_RD(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_COMMAND);
+
+		if (val & MCPR_NVM_COMMAND_DONE) {
+			val = REG_RD(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_READ);
+			DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
+			*ret_val = val;
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+			    int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	u32 val;
+
+	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+		DP(NETIF_MSG_NVM, "Invalid paramter: offset 0x%x"
+				  "  buf_size 0x%x\n",
+		   offset, buf_size);
+		return -EINVAL;
+	}
+
+	if (offset + buf_size > bp->flash_size) {
+		DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +"
+				  " buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc) {
+		return rc;
+	}
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	/* read the first word(s) */
+	cmd_flags = MCPR_NVM_COMMAND_FIRST;
+	while ((buf_size > sizeof(u32)) && (rc == 0)) {
+		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+		memcpy(ret_buf, &val, 4);
+
+		/* advance to the next dword */
+		offset += sizeof(u32);
+		ret_buf += sizeof(u32);
+		buf_size -= sizeof(u32);
+		cmd_flags = 0;
+	}
+
+	if (rc == 0) {
+		cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+		memcpy(ret_buf, &val, 4);
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_get_eeprom(struct net_device *dev,
+			    struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc;
+
+	DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+	   eeprom->len, eeprom->len);
+
+	/* parameters already validated in ethtool_get_eeprom */
+
+	rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
+				   u32 cmd_flags)
+{
+	int rc;
+	int count, i;
+
+	/* build the command word */
+	cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+
+	/* need to clear DONE bit separately */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_COMMAND,
+	       MCPR_NVM_COMMAND_DONE);
+
+	/* write the data */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_WRITE, val);
+
+	/* address of the NVRAM to write to */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_ADDR,
+	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+	/* issue the write command */
+	REG_WR(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+	/* adjust timeout for emulation/FPGA */
+	count = NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV(bp) == CHIP_REV_EMUL)
+		count *= 100;
+
+	/* wait for completion */
+	rc = -EBUSY;
+	for (i = 0; i < count; i++) {
+		udelay(5);
+		val = REG_RD(bp, GRCBASE_MCP, MCP_REG_MCPR_NVM_COMMAND);
+		if (val & MCPR_NVM_COMMAND_DONE) {
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+#define BYTE_OFFSET(offset)		(8 * (3 - (offset & 0x03)))
+
+static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
+			      int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	u32 align_offset;
+	u32 val;
+
+	if (offset + buf_size > bp->flash_size) {
+		DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +"
+				  " buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc) {
+		return rc;
+	}
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
+	align_offset = (offset & ~0x03);
+	rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+
+	if (rc == 0) {
+		val &= ~(0xff << BYTE_OFFSET(offset));
+		val |= (*data_buf << BYTE_OFFSET(offset));
+
+		DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
+
+		rc = bnx2x_nvram_write_dword(bp, align_offset, val,
+					     cmd_flags);
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
+			     int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	u32 val;
+	u32 written_so_far;
+
+	if (buf_size == 1) {	/* ethtool */
+		return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
+	}
+
+	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+		DP(NETIF_MSG_NVM, "Invalid paramter: offset 0x%x"
+				  "  buf_size 0x%x\n",
+		   offset, buf_size);
+		return -EINVAL;
+	}
+
+	if (offset + buf_size > bp->flash_size) {
+		DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +"
+				  " buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc) {
+		return rc;
+	}
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	written_so_far = 0;
+	cmd_flags = MCPR_NVM_COMMAND_FIRST;
+	while ((written_so_far < buf_size) && (rc == 0)) {
+		if (written_so_far == (buf_size - sizeof(u32)))
+			cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
+			cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		else if ((offset % NVRAM_PAGE_SIZE) == 0)
+			cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+
+		memcpy(&val, data_buf, 4);
+		DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
+
+		rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+
+		/* advance to the next dword */
+		offset += sizeof(u32);
+		data_buf += sizeof(u32);
+		written_so_far += sizeof(u32);
+		cmd_flags = 0;
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_set_eeprom(struct net_device *dev,
+			    struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc;
+
+	DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+	   eeprom->len, eeprom->len);
+
+	/* parameters already validated in ethtool_set_eeprom */
+
+	rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int bnx2x_get_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+	coal->rx_coalesce_usecs = bp->rx_ticks;
+	coal->tx_coalesce_usecs = bp->tx_ticks;
+	coal->stats_block_coalesce_usecs = bp->stats_ticks;
+
+	return 0;
+}
+
+static void bnx2x_update_coalesce(struct bnx2x *bp)
+{
+	int port = bp->port;
+	int i;
+
+	for_each_queue(bp, i) {
+		/* TBD index need to be changed on BE machines! */
+
+		/* HC_INDEX_U_ETH_RX_CQ_CONS */
+		REG_WR8(bp, BAR_USTRORM_INTMEM,
+			USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
+						   HC_INDEX_U_ETH_RX_CQ_CONS),
+			bp->rx_ticks_int/12);
+		REG_WR16(bp, BAR_USTRORM_INTMEM,
+			 USTORM_SB_HC_DISABLE_OFFSET(port, i,
+						   HC_INDEX_U_ETH_RX_CQ_CONS),
+			 bp->rx_ticks_int ? 0 : 1);
+
+		/* HC_INDEX_C_ETH_TX_CQ_CONS */
+		REG_WR8(bp, BAR_CSTRORM_INTMEM,
+			CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
+						   HC_INDEX_C_ETH_TX_CQ_CONS),
+			bp->tx_ticks_int/12);
+		REG_WR16(bp, BAR_CSTRORM_INTMEM,
+			 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
+						   HC_INDEX_C_ETH_TX_CQ_CONS),
+			 bp->tx_ticks_int ? 0 : 1);
+	}
+	/* TBD Dynamic HC? */
+}
+
+static int bnx2x_set_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
+	if (bp->rx_ticks > 3000)
+		bp->rx_ticks = 3000;
+
+	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
+	if (bp->tx_ticks > 0x3000)
+		bp->tx_ticks = 0x3000;
+
+	bp->stats_ticks = coal->stats_block_coalesce_usecs;
+	if (bp->stats_ticks > 0xffff00)
+		bp->stats_ticks = 0xffff00;
+	bp->stats_ticks &= 0xffff00;
+
+	if (netif_running(bp->dev)) {
+		bnx2x_update_coalesce(bp);
+	}
+
+	return 0;
+}
+
+static void bnx2x_get_ringparam(struct net_device *dev,
+				struct ethtool_ringparam *ering)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = MAX_RX_AVAIL;
+	ering->rx_mini_max_pending = 0;
+	ering->rx_jumbo_max_pending = 0;
+
+	ering->rx_pending = bp->rx_ring_size;
+	ering->rx_mini_pending = 0;
+	ering->rx_jumbo_pending = 0;
+
+	ering->tx_max_pending = MAX_TX_AVAIL;
+	ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnx2x_set_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *ering)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if ((ering->rx_pending > MAX_RX_AVAIL) ||
+	    (ering->tx_pending > MAX_TX_AVAIL) ||
+	    (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
+
+		return -EINVAL;
+	}
+	bp->rx_ring_size = ering->rx_pending;
+	bp->tx_ring_size = ering->tx_pending;
+
+	if (netif_running(bp->dev)) {
+		bnx2x_nic_unload(bp, 0);
+		bnx2x_nic_load(bp, 0);
+	}
+
+	return 0;
+}
+
+static void bnx2x_get_pauseparam(struct net_device *dev,
+				 struct ethtool_pauseparam *epause)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	epause->autoneg =
+		((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
+	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
+	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
+
+	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
+	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+}
+
+static int bnx2x_set_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
+	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+
+	bp->req_flow_ctrl = FLOW_CTRL_AUTO;
+	if (epause->rx_pause)
+		bp->req_flow_ctrl |= FLOW_CTRL_RX;
+	if (epause->tx_pause)
+		bp->req_flow_ctrl |= FLOW_CTRL_TX;
+
+	if (epause->autoneg) {
+		bp->req_autoneg |= AUTONEG_FLOW_CTRL;
+	} else {
+		bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
+	}
+
+	switch (bp->req_flow_ctrl) {
+	case FLOW_CTRL_AUTO:
+		if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
+			bp->pause_mode = PAUSE_BOTH;
+			bp->advertising |= (ADVERTISED_Pause |
+					    ADVERTISED_Asym_Pause);
+		} else {
+			bp->pause_mode = PAUSE_NONE;
+			bp->advertising &= ~(ADVERTISED_Pause |
+					     ADVERTISED_Asym_Pause);
+		}
+		break;
+
+	case FLOW_CTRL_TX:
+		bp->pause_mode = PAUSE_ASYMMETRIC;
+		bp->advertising |= ADVERTISED_Asym_Pause;
+		break;
+
+	case FLOW_CTRL_RX:
+		bp->pause_mode = PAUSE_BOTH;
+		bp->advertising |= (ADVERTISED_Pause |
+				    ADVERTISED_Asym_Pause);
+		break;
+
+	case FLOW_CTRL_BOTH:
+		bp->pause_mode = PAUSE_SYMMETRIC;
+		bp->advertising |= ADVERTISED_Pause;
+		break;
+	}
+
+	DP(NETIF_MSG_LINK, "req_autoneg 0x%x  req_flow_ctrl 0x%x\n"
+	   DP_LEVEL "  pause_mode %d  advertising 0x%x\n",
+	   bp->req_autoneg, bp->req_flow_ctrl, bp->pause_mode,
+	   bp->advertising);
+
+	bnx2x_stop_stats(bp);
+	bnx2x_link_initialize(bp);
+
+	return 0;
+}
+
+static u32 bnx2x_get_rx_csum(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->rx_csum;
+}
+
+static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bp->rx_csum = data;
+	return 0;
+}
+
+static int bnx2x_set_tso(struct net_device *dev, u32 data)
+{
+	if (data)
+		dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
+	else
+		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
+	return 0;
+}
+
+static struct {
+	char string[ETH_GSTRING_LEN];
+} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
+	{ "idle check (online)" },
+	{ "MC Errors  (online)" }
+};
+
+static int bnx2x_self_test_count(struct net_device *dev)
+{
+	return BNX2X_NUM_TESTS;
+}
+
+static void bnx2x_self_test(struct net_device *dev,
+			    struct ethtool_test *etest, u64 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+
+	if (bnx2x_idle_chk(bp) != 0) {
+		buf[0] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+	if (bnx2x_mc_assert(bp)) {
+		buf[1] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+	bnx2x_panic_dump(bp);
+}
+
+static struct {
+	char string[ETH_GSTRING_LEN];
+} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
+	{ "rx_bytes"},                           /*  0 */
+	{ "rx_error_bytes"},                     /*  1 */
+	{ "tx_bytes"},                           /*  2 */
+	{ "tx_error_bytes"},                     /*  3 */
+	{ "rx_ucast_packets"},                   /*  4 */
+	{ "rx_mcast_packets"},                   /*  5 */
+	{ "rx_bcast_packets"},                   /*  6 */
+	{ "tx_ucast_packets"},                   /*  7 */
+	{ "tx_mcast_packets"},                   /*  8 */
+	{ "tx_bcast_packets"},                   /*  9 */
+	{ "tx_mac_errors"},                      /* 10 */
+	{ "tx_carrier_errors"},                  /* 11 */
+	{ "rx_crc_errors"},                      /* 12 */
+	{ "rx_align_errors"},                    /* 13 */
+	{ "tx_single_collisions"},               /* 14 */
+	{ "tx_multi_collisions"},                /* 15 */
+	{ "tx_deferred"},                        /* 16 */
+	{ "tx_excess_collisions"},               /* 17 */
+	{ "tx_late_collisions"},                 /* 18 */
+	{ "tx_total_collisions"},                /* 19 */
+	{ "rx_fragments"},                       /* 20 */
+	{ "rx_jabbers"},                         /* 21 */
+	{ "rx_undersize_packets"},               /* 22 */
+	{ "rx_oversize_packets"},                /* 23 */
+	{ "rx_64_byte_packets"},                 /* 24 */
+	{ "rx_65_to_127_byte_packets"},          /* 25 */
+	{ "rx_128_to_255_byte_packets"},         /* 26 */
+	{ "rx_256_to_511_byte_packets"},         /* 27 */
+	{ "rx_512_to_1023_byte_packets"},        /* 28 */
+	{ "rx_1024_to_1522_byte_packets"},       /* 29 */
+	{ "rx_1523_to_9022_byte_packets"},       /* 30 */
+	{ "tx_64_byte_packets"},                 /* 31 */
+	{ "tx_65_to_127_byte_packets"},          /* 32 */
+	{ "tx_128_to_255_byte_packets"},         /* 33 */
+	{ "tx_256_to_511_byte_packets"},         /* 34 */
+	{ "tx_512_to_1023_byte_packets"},        /* 35 */
+	{ "tx_1024_to_1522_byte_packets"},       /* 36 */
+	{ "tx_1523_to_9022_byte_packets"},       /* 37 */
+	{ "rx_xon_frames"},                      /* 38 */
+	{ "rx_xoff_frames"},                     /* 39 */
+	{ "tx_xon_frames"},                      /* 40 */
+	{ "tx_xoff_frames"},                     /* 41 */
+	{ "rx_mac_ctrl_frames"},                 /* 44 */
+	{ "rx_filtered_packets"},                /* 43 */
+	{ "rx_discards"},                        /* 44 */
+};
+
+#define STATS_OFFSET32(offset_name) \
+	(offsetof(struct bnx2x_eth_stats, offset_name) / 4)
+
+static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
+	STATS_OFFSET32(total_bytes_received_hi),                     /*  0 */
+	STATS_OFFSET32(stat_IfHCInBadOctets_hi),                     /*  1 */
+	STATS_OFFSET32(total_bytes_transmitted_hi),                  /*  2 */
+	STATS_OFFSET32(stat_IfHCOutBadOctets_hi),                    /*  3 */
+	STATS_OFFSET32(total_unicast_packets_received_hi),           /*  4 */
+	STATS_OFFSET32(total_multicast_packets_received_hi),         /*  5 */
+	STATS_OFFSET32(total_broadcast_packets_received_hi),         /*  6 */
+	STATS_OFFSET32(total_unicast_packets_transmitted_hi),        /*  7 */
+	STATS_OFFSET32(total_multicast_packets_transmitted_hi),      /*  8 */
+	STATS_OFFSET32(total_broadcast_packets_transmitted_hi),      /*  9 */
+	STATS_OFFSET32(stat_Dot3statsinternalmactransmiterrors),     /* 10 */
+	STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),            /* 11 */
+	STATS_OFFSET32(crc_receive_errors),                          /* 12 */
+	STATS_OFFSET32(alignment_errors),                            /* 13 */
+	STATS_OFFSET32(single_collision_transmit_frames),            /* 14 */
+	STATS_OFFSET32(multiple_collision_transmit_frames),          /* 15 */
+	STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),         /* 16 */
+	STATS_OFFSET32(excessive_collision_frames),                  /* 17 */
+	STATS_OFFSET32(late_collision_frames),                       /* 18 */
+	STATS_OFFSET32(number_of_bugs_found_in_stats_spec),          /* 19 */
+	STATS_OFFSET32(runt_packets_received),                       /* 20 */
+	STATS_OFFSET32(jabber_packets_received),                     /* 21 */
+	STATS_OFFSET32(error_runt_packets_received),                 /* 22 */
+	STATS_OFFSET32(error_jabber_packets_received),               /* 23 */
+	STATS_OFFSET32(frames_received_64_bytes),                    /* 24 */
+	STATS_OFFSET32(frames_received_65_127_bytes),                /* 25 */
+	STATS_OFFSET32(frames_received_128_255_bytes),               /* 26 */
+	STATS_OFFSET32(frames_received_256_511_bytes),               /* 27 */
+	STATS_OFFSET32(frames_received_512_1023_bytes),              /* 28 */
+	STATS_OFFSET32(frames_received_1024_1522_bytes),             /* 29 */
+	STATS_OFFSET32(frames_received_1523_9022_bytes),             /* 30 */
+	STATS_OFFSET32(frames_transmitted_64_bytes),                 /* 31 */
+	STATS_OFFSET32(frames_transmitted_65_127_bytes),             /* 32 */
+	STATS_OFFSET32(frames_transmitted_128_255_bytes),            /* 33 */
+	STATS_OFFSET32(frames_transmitted_256_511_bytes),            /* 34 */
+	STATS_OFFSET32(frames_transmitted_512_1023_bytes),           /* 35 */
+	STATS_OFFSET32(frames_transmitted_1024_1522_bytes),          /* 36 */
+	STATS_OFFSET32(frames_transmitted_1523_9022_bytes),          /* 37 */
+	STATS_OFFSET32(pause_xon_frames_received),                   /* 38 */
+	STATS_OFFSET32(pause_xoff_frames_received),                  /* 39 */
+	STATS_OFFSET32(pause_xon_frames_transmitted),                /* 40 */
+	STATS_OFFSET32(pause_xoff_frames_transmitted),               /* 41 */
+	STATS_OFFSET32(control_frames_received),                     /* 44 */
+	STATS_OFFSET32(mac_filter_discard),                          /* 43 */
+	STATS_OFFSET32(no_buff_discard),                             /* 44 */
+};
+
+static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
+	8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
+	4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
+	4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+	4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+	4, 4, 4, 4, 4,
+};
+
+static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
+		break;
+
+	case ETH_SS_TEST:
+		memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+		break;
+	}
+}
+
+static int bnx2x_get_stats_count(struct net_device *dev)
+{
+	return BNX2X_NUM_STATS;
+}
+
+static void bnx2x_get_ethtool_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 *hw_stats = (u32 *)bnx2x_sp(bp, eth_stats);
+	int i;
+	u8 *stats_len_arr = bnx2x_stats_len_arr;
+
+	for (i = 0; i < BNX2X_NUM_STATS; i++) {
+		if (stats_len_arr[i] == 0) {
+			/* skip this counter */
+			buf[i] = 0;
+			continue;
+		}
+		if (stats_len_arr[i] == 4) {
+			/* 4-byte counter */
+		       buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
+			continue;
+		}
+		/* 8-byte counter */
+		buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
+				  *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
+	}
+}
+
+static int bnx2x_phys_id(struct net_device *dev, u32 data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i;
+
+	if (data == 0)
+		data = 2;
+
+	for (i = 0; i < (data * 2); i++) {
+		if ((i % 2) == 0) {
+			bnx2x_leds_set(bp, SPEED_1000);
+		} else {
+			bnx2x_leds_unset(bp);
+		}
+		msleep_interruptible(500);
+		if (signal_pending(current))
+			break;
+	}
+
+	if (bp->link_up) {
+		bnx2x_leds_set(bp, bp->line_speed);
+	}
+	return 0;
+}
+
+
+static struct ethtool_ops bnx2x_ethtool_ops = {
+	.get_settings		= bnx2x_get_settings,
+	.set_settings		= bnx2x_set_settings,
+	.get_drvinfo		= bnx2x_get_drvinfo,
+	.get_wol		= bnx2x_get_wol,
+	.set_wol		= bnx2x_set_wol,
+	.get_msglevel		= bnx2x_get_msglevel,
+	.set_msglevel		= bnx2x_set_msglevel,
+	.nway_reset		= bnx2x_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_eeprom_len		= bnx2x_get_eeprom_len,
+	.get_eeprom		= bnx2x_get_eeprom,
+	.set_eeprom		= bnx2x_set_eeprom,
+	.get_coalesce		= bnx2x_get_coalesce,
+	.set_coalesce		= bnx2x_set_coalesce,
+	.get_ringparam		= bnx2x_get_ringparam,
+	.set_ringparam		= bnx2x_set_ringparam,
+	.get_pauseparam		= bnx2x_get_pauseparam,
+	.set_pauseparam		= bnx2x_set_pauseparam,
+	.get_rx_csum		= bnx2x_get_rx_csum,
+	.set_rx_csum		= bnx2x_set_rx_csum,
+	.get_tx_csum		= ethtool_op_get_tx_csum,
+	.set_tx_csum		= ethtool_op_set_tx_csum,
+	.get_sg			= ethtool_op_get_sg,
+	.set_sg			= ethtool_op_set_sg,
+	.get_tso		= ethtool_op_get_tso,
+	.set_tso		= bnx2x_set_tso,
+	.self_test_count	= bnx2x_self_test_count,
+	.self_test		= bnx2x_self_test,
+	.get_strings		= bnx2x_get_strings,
+	.phys_id		= bnx2x_phys_id,
+	.get_stats_count	= bnx2x_get_stats_count,
+	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
+	.get_perm_addr		= ethtool_op_get_perm_addr,
+};
+
+#ifdef BNX2X_GZIP_MC /* need change to init tool */
+/****************************************************************************
+* gzip service functions
+****************************************************************************/
+
+#define FW_BUF_SIZE			0x8000
+
+static int bnx2x_gunzip_init(struct bnx2x *bp)
+{
+	bp->gunzip_buf = vmalloc(FW_BUF_SIZE);
+	if (bp->gunzip_buf  == NULL)
+		goto gunzip_nomem1;
+
+	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+	if (bp->strm  == NULL)
+		goto gunzip_nomem2;
+
+	bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
+				      GFP_KERNEL);
+	if (bp->strm->workspace == NULL)
+		goto gunzip_nomem3;
+
+	return 0;
+
+gunzip_nomem3:
+	kfree(bp->strm);
+	bp->strm = NULL;
+
+gunzip_nomem2:
+	vfree(bp->gunzip_buf);
+	bp->gunzip_buf = NULL;
+
+gunzip_nomem1:
+	printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
+	       "uncompression.\n", bp->dev->name);
+	return -ENOMEM;
+}
+
+static void bnx2x_gunzip_end(struct bnx2x *bp)
+{
+	kfree(bp->strm->workspace);
+
+	kfree(bp->strm);
+	bp->strm = NULL;
+
+	if (bp->gunzip_buf) {
+		vfree(bp->gunzip_buf);
+		bp->gunzip_buf = NULL;
+	}
+}
+
+static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len,
+			void **outbuf, int *outlen)
+{
+	int n, rc;
+
+	/* check gzip header */
+	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
+		return -EINVAL;
+
+	n = 10;
+
+#define FNAME				0x8
+
+	if (zbuf[3] & FNAME)
+		while ((zbuf[n++] != 0) && (n < len));
+
+	bp->strm->next_in = zbuf + n;
+	bp->strm->avail_in = len - n;
+	bp->strm->next_out = bp->gunzip_buf;
+	bp->strm->avail_out = FW_BUF_SIZE;
+
+	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+	if (rc != Z_OK)
+		return rc;
+
+	rc = zlib_inflate(bp->strm, Z_FINISH);
+
+	*outlen = FW_BUF_SIZE - bp->strm->avail_out;
+	*outbuf = bp->gunzip_buf;
+
+	if ((rc != Z_OK) && (rc != Z_STREAM_END))
+		printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
+		       bp->dev->name, bp->strm->msg);
+
+	zlib_inflateEnd(bp->strm);
+
+	if (rc == Z_STREAM_END)
+		return 0;
+
+	return rc;
+}
+#endif
+
+/* end of ethtool */
+
+
+/* Called with rtnl_lock */
+static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mii_ioctl_data *data = if_mii(ifr);
+	struct bnx2x *bp = netdev_priv(dev);
+	int err;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = bp->phy_addr;
+
+		/* fallthru */
+	case SIOCGMIIREG: {
+		u32 mii_regval;
+
+		spin_lock_bh(&bp->phy_lock);
+		if (bp->state == BNX2X_STATE_OPEN) {
+			err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
+						&mii_regval);
+
+			data->val_out = mii_regval;
+		} else {
+			err = -EAGAIN;
+		}
+		spin_unlock_bh(&bp->phy_lock);
+		return err;
+	}
+
+	case SIOCSMIIREG:
+		if (!capable(CAP_NET_ADMIN)) {
+			return -EPERM;
+		}
+
+		spin_lock_bh(&bp->phy_lock);
+		if (bp->state == BNX2X_STATE_OPEN) {
+			err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
+						 data->val_in);
+		} else {
+			err = -EAGAIN;
+		}
+		spin_unlock_bh(&bp->phy_lock);
+		return err;
+
+	default:
+		/* do nothing */
+		break;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+/* Called with rtnl_lock */
+static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	if (netif_running(dev))
+		bnx2x_set_mac_addr(bp);
+
+	return 0;
+}
+
+/* Called with rtnl_lock */
+static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
+	    ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
+		return -EINVAL;
+
+	/* This does not race with packet allocation
+	 * because the actuall alloc size is
+	 * only updated as part of load
+	 */
+	dev->mtu = new_mtu;
+	if (netif_running(dev)) {
+		bnx2x_nic_unload(bp, 0);
+		bnx2x_nic_load(bp, 0);
+	}
+	return 0;
+}
+
+#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+static void poll_bnx2x(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	disable_irq(bp->pdev->irq);
+	bnx2x_interrupt(bp->pdev->irq, dev);
+	enable_irq(bp->pdev->irq);
+}
+#endif
+
+static int __devinit bnx2x_init_board(struct pci_dev *pdev,
+				      struct net_device *dev)
+{
+	struct bnx2x *bp;
+	int rc;
+
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	bp = netdev_priv(dev);
+
+	bp->flags = 0;
+	bp->phy_flags = 0;
+	bp->port = PCI_FUNC(pdev->devfn);
+
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
+		goto err_out;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		printk(KERN_ERR PFX "Cannot find PCI device base address, "
+		       "aborting.\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+		printk(KERN_ERR PFX "Cannot find second PCI device "
+		       "base address, aborting.\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+	if (rc) {
+		printk(KERN_ERR PFX "Cannot obtain PCI resources, "
+		       "aborting.\n");
+		goto err_out_disable;
+	}
+
+	pci_set_master(pdev);
+
+	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (bp->pm_cap == 0) {
+		printk(KERN_ERR PFX "Cannot find power management "
+		       "capability, aborting.\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (bp->pcie_cap == 0) {
+		printk(KERN_ERR PFX "Cannot find PCI Express capability,"
+		       " aborting.\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
+		bp->flags |= USING_DAC_FLAG;
+		if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
+			printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
+			       "failed, aborting.\n");
+			rc = -EIO;
+			goto err_out_release;
+		}
+	} else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
+
+		printk(KERN_ERR PFX "System does not support DMA,"
+		       " aborting.\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	bp->dev = dev;
+	bp->pdev = pdev;
+
+	spin_lock_init(&bp->phy_lock);
+	INIT_WORK(&bp->reset_task, bnx2x_reset_task);
+	tasklet_init(&bp->sp_task, bnx2x_sp_task, (unsigned long)bp);
+
+	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
+	dev->mem_end = pci_resource_end(pdev, 0);
+
+	dev->irq = pdev->irq;
+
+	bp->regview = ioremap_nocache(dev->base_addr,
+				      pci_resource_len(pdev, 0));
+	if (!bp->regview) {
+		printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
+		rc = -ENOMEM;
+		goto err_out_release;
+	}
+
+	bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
+					pci_resource_len(pdev, 2));
+	if (!bp->doorbells) {
+		printk(KERN_ERR PFX "Cannot map doorbell space, aborting.\n");
+		rc = -ENOMEM;
+		goto err_out_unmap;
+	}
+
+	bnx2x_set_power_state(bp, PCI_D0);
+	/* TBD why does open also call this? */
+
+	bnx2x_get_hwinfo(bp);
+
+	if (CHIP_REV(bp) == CHIP_REV_FPGA) {
+		printk(KERN_ERR PFX "FPGA detacted. MCP disabled,"
+		       " will only init first device\n");
+		onefunc = 1;
+		nomcp = 1;
+	}
+
+	if (nomcp) {
+		printk(KERN_ERR PFX "MCP disabled, will only"
+		       " init first device\n");
+		onefunc = 1;
+	}
+
+	if (onefunc && bp->port) {
+		printk(KERN_ERR PFX "Second device disabled. exiting.\n");
+		rc = -ENODEV;
+		goto err_out_unmap;
+	}
+
+	bp->tx_ring_size = MAX_TX_AVAIL;
+	bp->rx_ring_size = MAX_RX_AVAIL;
+
+	bp->rx_csum = 1;
+
+	bp->rx_offset = 0;
+
+	bp->tx_quick_cons_trip_int = 0xff;
+	bp->tx_quick_cons_trip = 0xff;
+	bp->tx_ticks_int = 50;
+	bp->tx_ticks = 50;
+
+	bp->rx_quick_cons_trip_int = 0xff;
+	bp->rx_quick_cons_trip = 0xff;
+	bp->rx_ticks_int = 25;
+	bp->rx_ticks = 25;
+
+	bp->stats_ticks = 1000000 & 0xffff00;
+
+	bp->timer_interval =  HZ;
+	bp->current_interval =  (poll ? poll : HZ); /*TBD fixme*/
+
+	init_timer(&bp->timer);
+	bp->timer.expires = RUN_AT(bp->timer_interval);
+	bp->timer.data = (unsigned long) bp;
+	bp->timer.function = bnx2x_timer;
+
+	return 0;
+
+err_out_unmap:
+	if (bp->regview) {
+		iounmap(bp->regview);
+		bp->regview = NULL;
+	}
+
+	if (bp->doorbells) {
+		iounmap(bp->doorbells);
+		bp->doorbells = NULL;
+	}
+
+err_out_release:
+	pci_release_regions(pdev);
+
+err_out_disable:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+err_out:
+	return rc;
+}
+
+static int __devinit bnx2x_init_one(struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	static int version_printed;
+	struct net_device *dev = NULL;
+	struct bnx2x *bp;
+	int rc, i;
+	int port = PCI_FUNC(pdev->devfn);
+
+	if (version_printed++ == 0)
+		printk(KERN_INFO "%s", version);
+
+	/* dev zeroed in init_etherdev */
+	dev = alloc_etherdev(sizeof(*bp));
+
+	if (!dev)
+		return -ENOMEM;
+
+	netif_carrier_off(dev);
+
+	bp = netdev_priv(dev);
+	bp->msglevel = debug;
+
+	if (port && onefunc) {
+		printk(KERN_ERR PFX "second function disabled. exiting\n");
+		return 0;
+	}
+
+	rc = bnx2x_init_board(pdev, dev);
+	if (rc < 0) {
+		free_netdev(dev);
+		return rc;
+	}
+
+	dev->open = bnx2x_open;
+	dev->hard_start_xmit = bnx2x_start_xmit;
+	dev->stop = bnx2x_close;
+	dev->get_stats = bnx2x_get_stats;
+	dev->set_multicast_list = bnx2x_set_rx_mode;
+	dev->do_ioctl = bnx2x_ioctl;
+	dev->set_mac_address = bnx2x_change_mac_addr;
+	dev->change_mtu = bnx2x_change_mtu;
+	dev->tx_timeout = bnx2x_tx_timeout;
+	dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef BCM_VLAN
+	dev->vlan_rx_register = bnx2x_vlan_rx_register;
+#endif
+	dev->poll = bnx2x_poll;
+	dev->ethtool_ops = &bnx2x_ethtool_ops;
+	dev->weight = 128;
+
+#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+	dev->poll_controller = poll_bnx2x;
+#endif
+	dev->features |= NETIF_F_SG;
+	if (bp->flags & USING_DAC_FLAG)
+		dev->features |= NETIF_F_HIGHDMA;
+	dev->features |= NETIF_F_IP_CSUM;
+#ifdef BCM_VLAN
+	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+#endif
+	dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
+
+	rc = register_netdev(dev);
+	if (rc) {
+		printk(KERN_ERR PFX "Cannot register net device\n");
+		if (bp->regview)
+			iounmap(bp->regview);
+		if (bp->doorbells)
+			iounmap(bp->doorbells);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		pci_set_drvdata(pdev, NULL);
+		free_netdev(dev);
+		return rc;
+	}
+
+	pci_set_drvdata(pdev, dev);
+
+	bp->name = board_info[ent->driver_data].name;
+	printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz "
+	       "found at mem %lx, IRQ %d, ",
+	       dev->name, bp->name,
+	       ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
+	       ((CHIP_ID(bp) & 0x0ff0) >> 4),
+	       ((bp->flags & PCIX_FLAG) ? "-X" : ""),
+	       ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
+	       bp->bus_speed_mhz,
+	       dev->base_addr,
+	       bp->pdev->irq);
+
+	printk("node addr ");
+	for (i = 0; i < 6; i++)
+		printk("%2.2x", dev->dev_addr[i]);
+	printk("\n");
+
+	return 0;
+}
+
+static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+
+	flush_scheduled_work();
+	tasklet_kill(&bp->sp_task);
+	unregister_netdev(dev);
+
+	if (bp->regview)
+		iounmap(bp->regview);
+
+	if (bp->doorbells)
+		iounmap(bp->doorbells);
+
+	free_netdev(dev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc;
+
+	if (!netif_running(dev))
+		return 0;
+
+	rc = bnx2x_nic_unload(bp, 0);
+	if (!rc) {
+		return rc;
+	}
+
+	netif_device_detach(dev);
+	pci_save_state(pdev);
+	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int bnx2x_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc;
+
+	if (!netif_running(dev))
+		return 0;
+	pci_restore_state(pdev);
+	bnx2x_set_power_state(bp, PCI_D0);
+	netif_device_attach(dev);
+	rc = bnx2x_nic_load(bp, 0);
+	if (rc) {
+		return rc;
+	}
+	return 0;
+}
+
+static struct pci_driver bnx2x_pci_driver = {
+	.name       = DRV_MODULE_NAME,
+	.id_table   = bnx2x_pci_tbl,
+	.probe      = bnx2x_init_one,
+	.remove     = __devexit_p(bnx2x_remove_one),
+	.suspend    = bnx2x_suspend,
+	.resume     = bnx2x_resume,
+};
+
+static int __init bnx2x_init(void)
+{
+	return pci_register_driver(&bnx2x_pci_driver);
+}
+
+static void __exit bnx2x_cleanup(void)
+{
+	pci_unregister_driver(&bnx2x_pci_driver);
+}
+
+module_init(bnx2x_init);
+module_exit(bnx2x_cleanup);
+
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
new file mode 100644
index 0000000..e9a82b2
--- /dev/null
+++ b/drivers/net/bnx2x.h
@@ -0,0 +1,962 @@
+/* bnx2x.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2006 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Eliezer Tamir <eliezert@...adcom.com>
+ * Based on code from Michael Chan's bnx2 driver
+ */
+
+
+#ifndef BNX2X_H
+#define BNX2X_H
+
+/* error/debug prints */
+
+#define DRV_MODULE_NAME		"bnx2x"
+#define PFX DRV_MODULE_NAME	": "
+
+/* for messages that are currently off */
+#define BNX2X_MSG_OFF			0
+#define BNX2X_MSG_MCP			0x10000	/* was: NETIF_MSG_HW */
+#define BNX2X_MSG_STATS			0x20000	/* was: NETIF_MSG_TIMER */
+#define NETIF_MSG_NVM			0x40000	/* was: NETIF_MSG_HW */
+
+#define DP_LEVEL			KERN_NOTICE	/* was: KERN_DEBUG */
+
+/* regular debug print */
+#define DP(__mask, __fmt, __args...) do { \
+	if (bp->msglevel & (__mask)) \
+		printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __FUNCTION__, \
+		__LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
+	} while (0)
+
+/* for errors (never masked) */
+#define BNX2X_ERR(__fmt, __args...) do { \
+	printk(KERN_ERR PFX "[%s:%d(%s)]" __fmt, __FUNCTION__, \
+		__LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
+	} while (0)
+
+/* before we have a dev->name use dev_info() */
+#define BNX2X_DEV_INFO(__fmt, __args...) do { \
+	if (bp->msglevel & NETIF_MSG_PROBE) \
+		dev_info(&bp->pdev->dev, __fmt, ##__args); \
+	} while (0)
+
+
+#define ATTN_NIG_FOR_FUNC0		(1L << 8)
+#define ATTN_SW_TIMER_4_FUNC0		(1L << 9)
+#define GPIO_2_FUNC0			(1L << 10)
+#define GPIO_3_FUNC0			(1L << 11)
+#define GPIO_4_FUNC0			(1L << 12)
+#define ATTN_GENERAL_ATTN_1		(1L << 13)
+#define ATTN_GENERAL_ATTN_2		(1L << 14)
+#define ATTN_GENERAL_ATTN_3		(1L << 15)
+
+#define ATTN_NIG_FOR_FUNC1		(1L << 8)
+#define ATTN_SW_TIMER_4_FUNC1		(1L << 9)
+#define GPIO_2_FUNC1			(1L << 10)
+#define GPIO_3_FUNC1			(1L << 11)
+#define GPIO_4_FUNC1			(1L << 12)
+#define ATTN_GENERAL_ATTN_4		(1L << 13)
+#define ATTN_GENERAL_ATTN_5		(1L << 14)
+#define ATTN_GENERAL_ATTN_6		(1L << 15)
+
+#define ATTN_HARD_WIRED_MASK		0xff00
+#define ATTENTION_ID			4
+
+#ifdef BNX2X_STOP_ON_ERROR
+#warning stop on error defined
+#define bnx2x_panic() \
+	do { bp->panic = 1; \
+		BNX2X_ERR("driver assert\n"); \
+		bnx2x_disable_int(bp); \
+		bnx2x_panic_dump(bp); \
+	} while (0);
+#else
+#define bnx2x_panic()
+#endif
+
+#define U64_LO(x)			(((u64)x) & 0xffffffff)
+#define U64_HI(x)			(((u64)x) >> 32)
+#define HILO_U64(hi, lo)			(((u64)hi << 32) + lo)
+
+/* This is the def and non-def status block ID format according to spec */
+#define SB_ID(port, stormID, cpuID)	(((port)<<7)|((stormID) << 5)|(cpuID))
+#define DEF_SB_ID(port, stormID)	(((port)<<7)|((stormID) << 5)|0x10)
+
+
+#define REG_RD(bp, block, offset) \
+		readl((u8 *)bp->regview + block + offset)
+
+#define REG_RD8(bp, block, offset) \
+		readb((u8 *)bp->regview + block + offset)
+
+#define REG_RD64(bp, block, offset) \
+		readq((u8 *)bp->regview + block + offset)
+
+#define REG_WR32(bp, block, offset, val)	REG_WR(bp, block, offset, val)
+
+#define REG_WR(bp, block, offset, val) \
+		writel((u32)val, (u8 *)bp->regview + block + offset)
+
+#define REG_WR16(bp, block, offset, val) \
+		writew((u16)val, (u8 *)bp->regview + block + offset)
+
+#define REG_WR8(bp, block, offset, val) \
+		writeb((u8)val, (u8 *)bp->regview + block + offset)
+
+#define REG_RD_IND(bp, block, offset) \
+		bnx2x_reg_rd_ind(bp, block + offset)
+
+#define REG_WR_IND(bp, block, offset, val) \
+		bnx2x_reg_wr_ind(bp, block + offset, val)
+
+#define SHMEM_RD(bp, type) \
+		REG_RD(bp, bp->shmem_base, offsetof(shmem_region_t, type))
+
+#define SHMEM_WR(bp, type, val) \
+		REG_WR(bp, bp->shmem_base, offsetof(shmem_region_t, type), val)
+
+/* must be used on a CID before placing it on a HW ring */
+#define HW_CID(bp, x)			(x | (bp->port << 23))
+#define HW_CID1(bp, x)			((u8)(x & 0xff))
+#define HW_CID2(bp, x)			((u8)((x >> 8) & 0xff))
+#define HW_CID3(bp, x)			((u8)(((x>>16) & 0x7f)|(bp->port<<7)))
+
+/* used on a CID received from the HW */
+#define SW_CID(x)			(x &(COMMON_RAMROD_ETH_RX_CQE_CID>>1))
+
+#define DPM_TRIGER_TYPE			0x40
+#define DOORBELL(bp, cid, val) \
+	do{ \
+		writel((u32)val, (u8 *)(bp)->doorbells + \
+		((BCM_PAGE_SIZE * cid) + DPM_TRIGER_TYPE)); \
+	} while (0)
+
+
+struct sw_rx_bd {
+	struct sk_buff	*skb;
+	DECLARE_PCI_UNMAP_ADDR(mapping)
+};
+
+struct sw_tx_bd {
+	struct sk_buff	*skb;
+	u16		first_bd;
+};
+#define BD_UNMAP_ADDR(bd)	HILO_U64((bd)->addr_hi, (bd)->addr_lo)
+#define BD_UNMAP_LEN(bd)	((bd)->nbytes)
+
+#define for_each_queue(bp, var)	for (var = 0; var < bp->num_queues; var++)
+
+#define for_each_nondefault_queue(bp, var) \
+				for (var = 1; var < bp->num_queues; var++)
+#define is_multi(bp)		(bp->num_queues > 1)
+
+
+#define MIN_ETHERNET_PACKET_SIZE	60
+#define MAX_ETHERNET_PACKET_SIZE	1514
+#define MAX_ETHERNET_JUMBO_PACKET_SIZE	9600
+
+
+#define RX_COPY_THRESH			92
+#define BCM_PAGE_BITS			12
+#define BCM_PAGE_SIZE			(1 << BCM_PAGE_BITS)
+
+#define NUM_TX_RINGS			32
+#define TX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_tx_bd))
+#define MAX_TX_DESC_CNT		(TX_DESC_CNT - 1)
+#define NUM_TX_BD		(TX_DESC_CNT * NUM_TX_RINGS)
+#define MAX_TX_BD		(NUM_TX_BD - 1)
+#define MAX_TX_AVAIL		(MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
+#define NEXT_TX_IDX(x)		((((x) & (MAX_TX_DESC_CNT)) == \
+				 (MAX_TX_DESC_CNT - 1)) ? (x)+2 : (x)+1)
+#define TX_BD(x)		((x) & MAX_TX_BD)
+#define TX_BD_POFF(x)		((x) & MAX_TX_DESC_CNT)
+
+#define NUM_RX_RINGS			32
+#define RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define MAX_RX_DESC_CNT		(RX_DESC_CNT - 1)
+#define NUM_RX_BD		(RX_DESC_CNT * NUM_RX_RINGS)
+#define MAX_RX_BD		(NUM_RX_BD - 1)
+#define MAX_RX_AVAIL		(MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+#define NEXT_RX_IDX(x)		((((x) & (MAX_RX_DESC_CNT)) == \
+				 (MAX_RX_DESC_CNT - 1)) ? (x)+2 : (x)+1)
+#define RX_BD(x)		((x) & MAX_RX_BD)
+
+#define SP_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define MAX_SP_DESC_CNT		(SP_DESC_CNT - 1)
+#define NEXT_SPE(x)		(((x)+1 == (MAX_SP_DESC_CNT)) ? 0 : (x)+1)
+
+#define BNX2X_BTR		3
+
+
+#define BNX2X_NO_RX_FLAGS	(TSTORM_ETH_DROP_FLAGS_DROP_ALL_PACKETS)
+
+#define BNX2X_NORMAL_RX_FLAGS  (TSTORM_ETH_DROP_FLAGS_DROP_TCP_CS_ERROR_FLG|\
+				TSTORM_ETH_DROP_FLAGS_DROP_IP_CS_ERROR_FLG|\
+				TSTORM_ETH_DROP_FLAGS_DONT_DROP_MAC_ERR_FLG|\
+				TSTORM_ETH_DROP_FLAGS_DROP_TOO_BIG_PACKETS|\
+				TSTORM_ETH_DROP_FLAGS_DROP_UNMATCH_UNICAST|\
+				TSTORM_ETH_DROP_FLAGS_DROP_UNMATCH_MULTICAST|\
+				TSTORM_ETH_DROP_FLAGS_DONT_DROP_TTL0_FLG)
+
+#define BNX2X_ALLMULTI_RX_FLAGS	(TSTORM_ETH_DROP_FLAGS_DROP_TCP_CS_ERROR_FLG|\
+				 TSTORM_ETH_DROP_FLAGS_DROP_IP_CS_ERROR_FLG|\
+				 TSTORM_ETH_DROP_FLAGS_DONT_DROP_MAC_ERR_FLG|\
+				 TSTORM_ETH_DROP_FLAGS_DROP_TOO_BIG_PACKETS|\
+				 TSTORM_ETH_DROP_FLAGS_DROP_UNMATCH_UNICAST|\
+				 TSTORM_ETH_DROP_FLAGS_DONT_DROP_TTL0_FLG)
+
+#define BNX2X_PROMISC_RX_FLAGS	(TSTORM_ETH_DROP_FLAGS_DONT_DROP_TTL0_FLG)
+
+
+struct regp {
+	u32 lo;
+	u32 hi;
+};
+
+struct bmac_stats {
+	struct regp tx_gtpkt;
+	struct regp tx_gtxpf;
+	struct regp tx_gtfcs;
+	struct regp tx_gtmca;
+	struct regp tx_gtgca;
+	struct regp tx_gtfrg;
+	struct regp tx_gtovr;
+	struct regp tx_gt64;
+	struct regp tx_gt127;
+	struct regp tx_gt255;	/* 10 */
+	struct regp tx_gt511;
+	struct regp tx_gt1023;
+	struct regp tx_gt1518;
+	struct regp tx_gt2047;
+	struct regp tx_gt4095;
+	struct regp tx_gt9216;
+	struct regp tx_gt16383;
+	struct regp tx_gtmax;
+	struct regp tx_gtufl;
+	struct regp tx_gterr;	/* 20 */
+	struct regp tx_gtbyt;
+
+	struct regp rx_gr64;
+	struct regp rx_gr127;
+	struct regp rx_gr255;
+	struct regp rx_gr511;
+	struct regp rx_gr1023;
+	struct regp rx_gr1518;
+	struct regp rx_gr2047;
+	struct regp rx_gr4095;
+	struct regp rx_gr9216;	/* 30 */
+	struct regp rx_gr16383;
+	struct regp rx_grmax;
+	struct regp rx_grpkt;
+	struct regp rx_grfcs;
+	struct regp rx_grmca;
+	struct regp rx_grbca;
+	struct regp rx_grxcf;
+	struct regp rx_grxpf;
+	struct regp rx_grxuo;
+	struct regp rx_grjbr;	/* 40 */
+	struct regp rx_grovr;
+	struct regp rx_grflr;
+	struct regp rx_grmeg;
+	struct regp rx_grmeb;
+	struct regp rx_grbyt;
+	struct regp rx_grund;
+	struct regp rx_grfrg;
+	struct regp rx_grerb;
+	struct regp rx_grfre;
+	struct regp rx_gripj;	/* 50 */
+};
+
+struct emac_stats {
+	u32 rx_ifhcinoctets                        ;
+	u32 rx_ifhcinbadoctets                     ;
+	u32 rx_etherstatsfragments                 ;
+	u32 rx_ifhcinucastpkts                     ;
+	u32 rx_ifhcinmulticastpkts                 ;
+	u32 rx_ifhcinbroadcastpkts                 ;
+	u32 rx_dot3statsfcserrors                  ;
+	u32 rx_dot3statsalignmenterrors            ;
+	u32 rx_dot3statscarriersenseerrors         ;
+	u32 rx_xonpauseframesreceived              ;	/* 10 */
+	u32 rx_xoffpauseframesreceived             ;
+	u32 rx_maccontrolframesreceived            ;
+	u32 rx_xoffstateentered                    ;
+	u32 rx_dot3statsframestoolong              ;
+	u32 rx_etherstatsjabbers                   ;
+	u32 rx_etherstatsundersizepkts             ;
+	u32 rx_etherstatspkts64octets              ;
+	u32 rx_etherstatspkts65octetsto127octets   ;
+	u32 rx_etherstatspkts128octetsto255octets  ;
+	u32 rx_etherstatspkts256octetsto511octets  ;	/* 20 */
+	u32 rx_etherstatspkts512octetsto1023octets ;
+	u32 rx_etherstatspkts1024octetsto1522octets;
+	u32 rx_etherstatspktsover1522octets        ;
+
+	u32 rx_falsecarriererrors                  ;
+
+	u32 tx_ifhcoutoctets                       ;
+	u32 tx_ifhcoutbadoctets                    ;
+	u32 tx_etherstatscollisions                ;
+	u32 tx_outxonsent                          ;
+	u32 tx_outxoffsent                         ;
+	u32 tx_flowcontroldone                     ;	/* 30 */
+	u32 tx_dot3statssinglecollisionframes      ;
+	u32 tx_dot3statsmultiplecollisionframes    ;
+	u32 tx_dot3statsdeferredtransmissions      ;
+	u32 tx_dot3statsexcessivecollisions        ;
+	u32 tx_dot3statslatecollisions             ;
+	u32 tx_ifhcoutucastpkts                    ;
+	u32 tx_ifhcoutmulticastpkts                ;
+	u32 tx_ifhcoutbroadcastpkts                ;
+	u32 tx_etherstatspkts64octets              ;
+	u32 tx_etherstatspkts65octetsto127octets   ;	/* 40 */
+	u32 tx_etherstatspkts128octetsto255octets  ;
+	u32 tx_etherstatspkts256octetsto511octets  ;
+	u32 tx_etherstatspkts512octetsto1023octets ;
+	u32 tx_etherstatspkts1024octetsto1522octet ;
+	u32 tx_etherstatspktsover1522octets        ;
+	u32 tx_dot3statsinternalmactransmiterrors  ;	/* 46 */
+};
+
+union mac_stats {
+	struct emac_stats emac;
+	struct bmac_stats bmac;
+};
+
+struct nig_stats {
+	u32 brb_discard;
+	u32 brb_packet;
+	u32 brb_truncate;
+	u32 flow_ctrl_discard;
+	u32 flow_ctrl_octets;
+	u32 flow_ctrl_packet;
+	u32 mng_discard;
+	u32 mng_octet_inp;
+	u32 mng_octet_out;
+	u32 mng_packet_inp;
+	u32 mng_packet_out;
+	u32 pbf_octets;
+	u32 pbf_packet;
+	u32 safc_inp;
+	u32 done;
+	u32 pad;
+};
+
+struct bnx2x_eth_stats {
+	u32 pad;	/* to make long counters u64 aligned */
+	u32 mac_stx_start;
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 crc_receive_errors;
+	u32 alignment_errors;
+	u32 false_carrier_detections;
+	u32 runt_packets_received;
+	u32 jabber_packets_received;
+	u32 pause_xon_frames_received;
+	u32 pause_xoff_frames_received;
+	u32 pause_xon_frames_transmitted;
+	u32 pause_xoff_frames_transmitted;
+	u32 single_collision_transmit_frames;
+	u32 multiple_collision_transmit_frames;
+	u32 late_collision_frames;
+	u32 excessive_collision_frames;
+	u32 control_frames_received;
+	u32 frames_received_64_bytes;
+	u32 frames_received_65_127_bytes;
+	u32 frames_received_128_255_bytes;
+	u32 frames_received_256_511_bytes;
+	u32 frames_received_512_1023_bytes;
+	u32 frames_received_1024_1522_bytes;
+	u32 frames_received_1523_9022_bytes;
+	u32 frames_transmitted_64_bytes;
+	u32 frames_transmitted_65_127_bytes;
+	u32 frames_transmitted_128_255_bytes;
+	u32 frames_transmitted_256_511_bytes;
+	u32 frames_transmitted_512_1023_bytes;
+	u32 frames_transmitted_1024_1522_bytes;
+	u32 frames_transmitted_1523_9022_bytes;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+	u32 error_runt_packets_received;
+	u32 error_jabber_packets_received;
+	u32 mac_stx_end;
+
+	u32 pad2;
+	u32 stat_IfHCInBadOctets_hi;
+	u32 stat_IfHCInBadOctets_lo;
+	u32 stat_IfHCOutBadOctets_hi;
+	u32 stat_IfHCOutBadOctets_lo;
+	u32 stat_Dot3statsinternalmactransmiterrors;
+	u32 stat_Dot3StatsCarrierSenseErrors;
+	u32 stat_Dot3StatsDeferredTransmissions;
+	u32 stat_FlowControlDone;
+	u32 stat_XoffStateEntered;
+
+	u32 x_total_sent_bytes_hi;
+	u32 x_total_sent_bytes_lo;
+	u32 x_total_sent_pkts;
+
+	u32 t_rcv_unicast_bytes_hi;
+	u32 t_rcv_unicast_bytes_lo;
+	u32 t_rcv_broadcast_bytes_hi;
+	u32 t_rcv_broadcast_bytes_lo;
+	u32 t_rcv_multicast_bytes_hi;
+	u32 t_rcv_multicast_bytes_lo;
+	u32 t_total_rcv_pkt;
+
+	u32 no_buff_discard;
+	u32 errors_discard;
+	u32 mac_filter_discard;
+	u32 ttl0_discard;
+	u32 xxoverflow_discard;
+
+	u32 brb_discard;
+	u32 brb_packet;
+	u32 brb_truncate;
+	u32 flow_ctrl_discard;
+	u32 flow_ctrl_octets;
+	u32 flow_ctrl_packet;
+	u32 mng_discard;
+	u32 mng_octet_inp;
+	u32 mng_octet_out;
+	u32 mng_packet_inp;
+	u32 mng_packet_out;
+	u32 pbf_octets;
+	u32 pbf_packet;
+	u32 safc_inp;
+	u32 driver_xoff;
+	u32 number_of_bugs_found_in_stats_spec; /* just kidding */
+};
+
+#ifdef BCM_MULTI
+#define MAX_CONTEXT 16
+#else
+#define MAX_CONTEXT 1
+#endif
+union cdu_context {
+	struct eth_context eth;
+	char pad[1024];
+};
+
+/* DMA memory not used in fastpath */
+struct bnx2x_slowpath {
+	union cdu_context		context[MAX_CONTEXT];
+	struct eth_stats_query		fw_stats;
+	struct mac_configuration_cmd	mac_config;
+	struct mac_configuration_cmd	mcast_config;
+	union mac_stats			mac_stats;
+	struct nig_stats		nig;
+	struct bnx2x_eth_stats		eth_stats;
+	u32				wb_comp;
+#define BNX2X_WB_COMP_VAL		0xe0d0d0ae
+	u32				wb_write[4];
+};
+
+#define bnx2x_sp(bp, var) (&bp->slowpath->var)
+#define bnx2x_sp_mapping(bp, var)\
+	(bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
+
+
+#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+
+struct bnx2x_fastpath {
+
+	struct eth_tx_bd        *tx_desc_ring;
+	struct sw_tx_bd        *tx_buf_ring;
+	dma_addr_t      tx_prods_mapping;
+	struct eth_tx_db_data *hw_tx_prods;
+
+	u16         tx_pkt_prod;
+	u16         tx_pkt_cons;
+	u16         tx_bd_prod;
+	u16         tx_bd_cons;
+
+	dma_addr_t      tx_desc_mapping;
+	dma_addr_t      rx_desc_mapping;
+	dma_addr_t      rx_comp_mapping;
+	dma_addr_t      status_blk_mapping;
+	struct host_status_block    *status_blk;
+
+/*	u16         hw_tx_cons;
+	u16         hw_rx_cons;
+*/
+	u16         *tx_cons_sb;
+	u16         *rx_cons_sb;
+	u16         fp_c_idx;
+	u16         fp_u_idx;
+
+	u16         rx_bd_prod;
+	u16         rx_bd_cons;
+	u16         rx_comp_prod;
+	u16         rx_comp_cons;
+
+	struct eth_rx_bd        *rx_desc_ring;
+	union eth_rx_cqe        *rx_comp_ring;
+	struct sw_rx_bd         *rx_buf_ring;
+	struct bnx2x     *bp; /*parent*/
+#ifdef BNX2X_STOP_ON_ERROR
+	u32 next_free, last_alloc;
+#endif
+	int     state;
+#define BNX2X_FP_STATE_CLOSED        0
+#define BNX2X_FP_STATE_IRQ           0x80000
+#define BNX2X_FP_STATE_OPENING       0x90000
+#define BNX2X_FP_STATE_OPEN          0xa0000
+#define BNX2X_FP_STATE_HALTING       0xb0000
+#define BNX2X_FP_STATE_HALTED        0xc0000
+#define BNX2X_FP_STATE_DELETED       0xd0000
+#define BNX2X_FP_STATE_CLOSE_IRQ     0xe0000
+	int         index;
+	struct tasklet_struct fp_task;
+	unsigned long tx_pkt, rx_pkt, rx_calls;
+};
+
+/* attn group wiring */
+struct attn_route {
+	u32	sig[4];
+};
+#define MAX_DYNAMIC_ATTN_GRPS		8
+
+struct bnx2x {
+    /* Fields used in the tx and intr/napi performance paths are grouped */
+    /* together in the beginning of the structure. */
+	void __iomem        *regview;
+	void __iomem        *doorbells;
+
+	struct net_device   *dev;
+	struct pci_dev      *pdev;
+
+	struct bnx2x_fastpath *fp;
+
+	atomic_t        intr_sem;
+
+	struct msix_entry msix_table[MAX_CONTEXT+1];
+
+	int         tx_ring_size;
+
+#ifdef BCM_VLAN
+	struct          vlan_group *vlgrp;
+#endif
+
+	u32         rx_offset;
+	u32         rx_buf_use_size;	/* useable size */
+	u32         rx_buf_size;	/* with alignment */
+	u32         rx_csum;
+
+	struct host_def_status_block    *def_status_blk;
+	u16         def_c_idx;
+	u16         def_u_idx;
+	u16         def_t_idx;
+	u16         def_x_idx;
+	u16         def_att_idx;
+	u16         attn_state;
+	struct attn_route  attn_group[MAX_DYNAMIC_ATTN_GRPS];
+	u32	    aeu_mask;
+	u32         nig_mask;
+
+    /* slow path ring */
+	struct eth_spe      *spq;
+	dma_addr_t  spq_mapping;
+	u16         spq_prod_idx, spq_con_idx;
+	u16         dsb_prod_sp_idx;
+	struct eth_spe   *spq_prod_bd, *spq_last_bd;
+	u16         * spq_hw_con;
+	u16         * dsb_sp_prod;
+	u16         spq_left;
+	spinlock_t  spq_lock; 	/* guess*/
+	u8          stat_pending; /* STAT_QUERY or CFC DELETE ramrod pending */
+
+    /* End of fileds used in the performance code paths. */
+
+	struct bnx2x_slowpath *slowpath;
+	dma_addr_t  slowpath_mapping;
+	void        * t1;
+	dma_addr_t  t1_mapping;
+	void        * t2;
+	dma_addr_t  t2_mapping;
+	void        * timers;
+	dma_addr_t  timers_mapping;
+	void        * qm;
+	dma_addr_t  qm_mapping;
+
+	char        *name;
+
+	int         state;
+#define BNX2X_STATE_CLOSED               0x0
+#define BNX2X_STATE_OPENING_WAIT4_LOAD   0x1000
+#define BNX2X_STATE_OPENING_WAIT4_PORT   0x2000
+#define BNX2X_STATE_OPEN                 0x3000
+#define BNX2X_STATE_CLOSING_WAIT4_HALT   0x4000
+#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
+#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
+#define BNX2X_STATE_ERROR                0xF000
+
+	int		timer_interval;
+	int		current_interval;
+	struct timer_list	timer;
+	struct work_struct	reset_task;
+	struct tasklet_struct	sp_task;
+	u16		sp_task_en;
+	u16		in_reset_task;
+
+    /* Used to synchronize phy accesses. */
+	spinlock_t	phy_lock;
+
+	u32		flags;
+#define PCIX_FLAG			1
+#define PCI_32BIT_FLAG			2
+#define ONE_TDMA_FLAG			4	/* no longer used */
+#define NO_WOL_FLAG			8
+#define USING_DAC_FLAG			0x10
+#define USING_MSIX_FLAG			0x20
+#define ASF_ENABLE_FLAG			0x40
+
+	int		port;
+
+	u32		shmem_base;
+
+	u32		chip_id;
+/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+#define CHIP_ID(bp)			(((bp)->chip_id) & 0xfffffff0)
+
+#define CHIP_NUM(bp)			(((bp)->chip_id) & 0xffff0000)
+#define CHIP_NUM_5710			0x57100000
+
+#define CHIP_REV(bp)			(((bp)->chip_id) & 0x0000f000)
+#define CHIP_REV_Ax			0x00000000
+#define CHIP_REV_Bx			0x00001000
+#define CHIP_REV_Cx			0x00002000
+#define CHIP_REV_EMUL			0x0000e000
+#define CHIP_REV_FPGA			0x0000f000
+#define CHIP_REV_IS_SLOW(bp)		((CHIP_REV(bp) == CHIP_REV_EMUL) || \
+					 (CHIP_REV(bp) == CHIP_REV_FPGA))
+
+#define CHIP_METAL(bp)			(((bp)->chip_id) & 0x00000ff0)
+#define CHIP_BOND_ID(bp)		(((bp)->chip_id) & 0x0000000f)
+
+	u16		fw_seq;
+	u16		fw_drv_pulse_wr_seq;
+	u32		fw_mb;
+
+	u32		hw_config;
+	u32		serdes_config;
+	u32		lane_config;
+	u32		ext_phy_config;
+#define XGXS_EXT_PHY_TYPE(bp)		(bp->ext_phy_config & \
+					 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+#define SERDES_EXT_PHY_TYPE(bp)		(bp->ext_phy_config & \
+					 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+
+	u32		speed_cap_mask;
+	u32		link_config;
+#define SWITCH_CFG_1G			PORT_FEATURE_CON_SWITCH_1G_SWITCH
+#define SWITCH_CFG_10G			PORT_FEATURE_CON_SWITCH_10G_SWITCH
+#define SWITCH_CFG_AUTO_DETECT		PORT_FEATURE_CON_SWITCH_AUTO_DETECT
+#define SWITCH_CFG_ONE_TIME_DETECT	\
+				PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT
+
+	u8		ser_lane;
+	u8		rx_lane_swap;
+	u8		tx_lane_swap;
+
+	u8		link_up;
+
+	u32		supported;
+/* link settings - missing defines */
+#define SUPPORTED_2500baseT_Full	(1 << 15)
+#define SUPPORTED_CX4			(1 << 16)
+
+	u32		autoneg;
+#define AUTONEG_CL37			SHARED_HW_CFG_AN_ENABLE_CL37
+#define AUTONEG_CL73			SHARED_HW_CFG_AN_ENABLE_CL73
+#define AUTONEG_BAM			SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_PARALLEL		\
+				SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
+#define AUTONEG_SGMII_FIBER_AUTODET	\
+				SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
+#define AUTONEG_REMOTE_PHY		SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+
+	u32		req_autoneg;
+#define AUTONEG_SPEED			0x1
+#define AUTONEG_FLOW_CTRL		0x2
+
+	u32		req_line_speed;
+/* link settings - missing defines */
+#define SPEED_12000			12000
+#define SPEED_12500			12500
+#define SPEED_13000			13000
+#define SPEED_15000			15000
+#define SPEED_16000			16000
+
+	u32		req_duplex;
+	u32		req_flow_ctrl;
+#define FLOW_CTRL_AUTO			PORT_FEATURE_FLOW_CONTROL_AUTO
+#define FLOW_CTRL_TX			PORT_FEATURE_FLOW_CONTROL_TX
+#define FLOW_CTRL_RX			PORT_FEATURE_FLOW_CONTROL_RX
+#define FLOW_CTRL_BOTH			PORT_FEATURE_FLOW_CONTROL_BOTH
+#define FLOW_CTRL_NONE			PORT_FEATURE_FLOW_CONTROL_NONE
+
+	u32		pause_mode;
+#define PAUSE_NONE			0
+#define PAUSE_SYMMETRIC			1
+#define PAUSE_ASYMMETRIC		2
+#define PAUSE_BOTH			3
+
+	u32		advertising;
+/* link settings - missing defines */
+#define ADVERTISED_2500baseT_Full	(1 << 15)
+#define ADVERTISED_CX4			(1 << 16)
+
+	u32		link_status;
+	u32		line_speed;
+	u32		duplex;
+	u32		flow_ctrl;
+
+	u32		phy_flags;
+/*#define PHY_SERDES_FLAG			0x1*/
+#define PHY_BMAC_FLAG			0x2
+#define PHY_EMAC_FLAG			0x4
+#define PHY_XGSX_FLAG			0x8
+#define PHY_SGMII_FLAG			0x10
+#define PHY_INT_MODE_MASK_FLAG		0x300
+#define PHY_INT_MODE_AUTO_POLLING_FLAG	0x100
+#define PHY_INT_MODE_LINK_READY_FLAG	0x200
+
+	u32		phy_addr;
+	u32		phy_id;
+
+	u32		bc_ver;
+
+	u16         bus_speed_mhz;
+	u8          wol;
+
+	u8          pad;
+
+	int         rx_ring_size;
+
+	u16         tx_quick_cons_trip;
+	u16         tx_quick_cons_trip_int;
+	u16         rx_quick_cons_trip;
+	u16         rx_quick_cons_trip_int;
+	u16         comp_prod_trip;
+	u16         comp_prod_trip_int;
+	u16         tx_ticks;
+	u16         tx_ticks_int;
+	u16         com_ticks;
+	u16         com_ticks_int;
+	u16         cmd_ticks;
+	u16         cmd_ticks_int;
+	u16         rx_ticks;
+	u16         rx_ticks_int;
+
+	u32         stats_ticks;
+
+	dma_addr_t		def_status_blk_mapping;
+
+	u32			rx_mode;
+#define BNX2X_RX_MODE_NONE		0
+#define BNX2X_RX_MODE_NORMAL		1
+#define BNX2X_RX_MODE_ALLMULTI		2
+#define BNX2X_RX_MODE_PROMISC		3
+#define BNX2X_MAX_MULTICAST		64
+#define BNX2X_MAX_EMUL_MULTI		16
+
+	int			pm_cap;
+	int			pcie_cap;
+
+	int			panic;
+	int			msglevel;
+	int			num_queues;
+
+	/* used to synchronize stats collecting */
+	atomic_t		stats_state;
+#define STATS_STATE_DISABLE		0
+#define STATS_STATE_ENABLE		1
+#define STATS_STATE_STOP		2 /* stop stats on next iteration */
+
+	u32			old_brb_discard;
+	struct bmac_stats	old_bmac;
+	struct net_device_stats	net_stats;
+	struct dmae_command	dmae;
+
+	int			flash_size;
+#define NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
+#define NVRAM_TIMEOUT_COUNT		30000
+#define NVRAM_PAGE_SIZE			256
+
+#define BNX2X_NUM_STATS			45
+#define BNX2X_NUM_TESTS			2
+
+	struct z_stream_s	*strm;
+	void			*gunzip_buf;
+};
+
+/* stuff added to make the code fit 80Col */
+#define XGSX_RESET_BITS \
+	(MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW |   \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ |      \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN |    \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
+
+#define SERDES_RESET_BITS \
+	(MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ |    \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN |  \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
+
+#define LINK_10THD      LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD      LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_100TXHD    LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
+#define LINK_100T4      LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100TXFD    LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
+#define LINK_1000THD    LINK_STATUS_SPEED_AND_DUPLEX_1000THD
+#define LINK_1000TFD    LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
+#define LINK_1000XFD    LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
+#define LINK_2500THD    LINK_STATUS_SPEED_AND_DUPLEX_2500THD
+#define LINK_2500TFD    LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
+#define LINK_2500XFD    LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
+#define LINK_10GTFD     LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD     LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_12GTFD     LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
+#define LINK_12GXFD     LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
+#define LINK_12_5GTFD   LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
+#define LINK_12_5GXFD   LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
+#define LINK_13GTFD     LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
+#define LINK_13GXFD     LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
+#define LINK_15GTFD     LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
+#define LINK_15GXFD     LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
+#define LINK_16GTFD     LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
+#define LINK_16GXFD     LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+
+#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
+#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
+#define GP_STATUS_SPEED_MASK \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
+#define GP_STATUS_10M	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
+#define GP_STATUS_100M	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
+#define GP_STATUS_1G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
+#define GP_STATUS_2_5G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
+#define GP_STATUS_5G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
+#define GP_STATUS_6G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
+#define GP_STATUS_10G_HIG \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
+#define GP_STATUS_10G_CX4 \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
+#define GP_STATUS_12G_HIG \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG
+#define GP_STATUS_12_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G
+#define GP_STATUS_13G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G
+#define GP_STATUS_15G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G
+#define GP_STATUS_16G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G
+#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
+#define GP_STATUS_10G_KX4 \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
+
+#define NIG_STATUS_INTERRUPT_XGXS0_LINK10G \
+	NIG_STATUS_INTERRUPT_PORT0_REGISTERS_STATUS_XGXS0_LINK10G
+#define NIG_XGXS0_LINK_STATUS \
+	NIG_STATUS_INTERRUPT_PORT0_REGISTERS_STATUS_XGXS0_LINK_STATUS
+#define NIG_XGXS0_LINK_STATUS_SIZE \
+	NIG_STATUS_INTERRUPT_PORT0_REGISTERS_STATUS_XGXS0_LINK_STATUS_SIZE
+#define NIG_SERDES0_LINK_STATUS \
+	NIG_STATUS_INTERRUPT_PORT0_REGISTERS_STATUS_SERDES0_LINK_STATUS
+#define MDIO_AN_CL73_OR_37_COMPLETE \
+	(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
+	 MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
+#define NIG_MASK_MI_INT \
+	NIG_MASK_INTERRUPT_PORT0_REGISTERS_MASK_EMAC0_MISC_MI_INT
+#define NIG_MASK_SERDES0_LINK_STATUS \
+	NIG_MASK_INTERRUPT_PORT0_REGISTERS_MASK_SERDES0_LINK_STATUS
+#define NIG_MASK_XGXS0_LINK_STATUS \
+	NIG_MASK_INTERRUPT_PORT0_REGISTERS_MASK_XGXS0_LINK_STATUS
+#define NIG_MASK_XGXS0_LINK10G \
+	NIG_MASK_INTERRUPT_PORT0_REGISTERS_MASK_XGXS0_LINK10G
+
+#define BNX2X_RX_SUM_OK(cqe) \
+	(!(cqe->fast_path_cqe.status_flags & \
+	 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \
+	  ETH_FAST_PATH_RX_CQE_TCP_XSUM_NO_VALIDATION_FLG)))
+
+#define BNX2X_MC_ASSERT_BITS \
+	(GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+	 GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+	 GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+	 GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
+#define BNX2X_MCP_ASSERT \
+	GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
+#define BNX2X_DOORQ_ASSERT \
+	AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
+#define BNX2X_FLOW_MODE_SHIFT \
+	TSTORM_ETH_LEADING_CONN_CONFIG_FLOW_MODE_SHIFT
+
+#define MISC_RESET_XGXS0_TXD_FIFO_RSTB \
+	MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB
+
+#define BNX2X_TX_SB_INDEX \
+	&fp->status_blk->c_status_block \
+	.index_values[HC_INDEX_C_ETH_TX_CQ_CONS]
+
+#define BNX2X_SPQ_SB_INDEX \
+	&bp->def_status_blk->x_def_status_block \
+	.index_values[HC_INDEX_DEF_X_SPQ_CONS]
+
+#define BNX2X_SP_DSB_INDEX \
+	&bp->def_status_blk->c_def_status_block\
+	.index_values[HC_INDEX_DEF_C_ETH_SLOW_PATH]
+
+#define BNX2X_RX_SB_INDEX \
+	&fp->status_blk->u_status_block \
+	.index_values[HC_INDEX_U_ETH_RX_CQ_CONS]
+
+#define CAM_IS_INVALID(x) \
+(x.target_table_entry.flags == TSTORM_CAM_TAGET_TABLE_ENTRY_ACTION_TYPE)
+
+/* DMAE command defines */
+#define DMAE_CMD_SRC_PCI		0
+#define DMAE_CMD_SRC_GRC		DMAE_COMMAND_SRC
+
+#define DMAE_CMD_DST_PCI		(1 << DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_DST_GRC		(2 << DMAE_COMMAND_DST_SHIFT)
+
+#define DMAE_CMD_C_DST_PCI		0
+#define DMAE_CMD_C_DST_GRC		(1 << DMAE_COMMAND_C_DST_SHIFT)
+
+#define DMAE_CMD_C_ENABLE		DMAE_COMMAND_C_TYPE_ENABLE
+
+#define DMAE_CMD_ENDIANITY_NO_SWAP	(0 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_SWAP	(1 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_DW_SWAP	(2 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_DW_SWAP	(3 << DMAE_COMMAND_ENDIANITY_SHIFT)
+
+#define DMAE_CMD_PORT_0			0
+#define DMAE_CMD_PORT_1			DMAE_COMMAND_PORT
+
+#define DMAE_CMD_SRC_RESET		DMAE_COMMAND_SRC_RESET
+#define DMAE_CMD_DST_RESET		DMAE_COMMAND_DST_RESET
+
+/* MISC_REGISTERS_RESET_REG - this is here for the hsi to work don't touch */
+
+#endif /*bnx2x.h*/


-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ