[<prev] [next>] [day] [month] [year] [list]
Message-ID: <201002121400.o1CE0vBa031882@blc-10-10.brocade.com>
Date: Fri, 12 Feb 2010 06:00:57 -0800
From: Rasesh Mody <rmody@...cade.com>
To: <netdev@...r.kernel.org>
CC: <adapter_linux_open_src_team@...cade.com>
Subject: Subject: [PATCH 2/6] bna: Brocade 10Gb Ethernet device driver
From: Rasesh Mody <rmody@...cade.com>
This is patch 2/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Source is based against net-next-2.6.
We wish this patch to be considered for inclusion in net-next-2.6
Signed-off-by: Rasesh Mody <rmody@...cade.com>
---
bfad_fwimg.c | 94 ++
bna_fn.c | 1797 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
bna_queue.c | 394 ++++++++++++
bnad_ethtool.c | 1100 ++++++++++++++++++++++++++++++++++
4 files changed, 3385 insertions(+)
diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bfad_fwimg.c net-next-2.6.33-rc5-mod/drivers/net/bna/bfad_fwimg.c
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bfad_fwimg.c 1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bfad_fwimg.c 2010-02-12 01:39:41.012908000 -0800
@@ -0,0 +1,94 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * bfad_fwimg.c Linux driver PCI interface module.
+ */
+#include "cna.h"
+#include "defs/bfa_defs_pci.h"
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <asm/fcntl.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include "bfa_fwimg_priv.h"
+
+u32 bfi_image_ct_size;
+u32 bfi_image_cb_size;
+u32 *bfi_image_ct;
+u32 *bfi_image_cb;
+
+#define BFAD_FW_FILE_CT "ctfw.bin"
+#define BFAD_FW_FILE_CB "cbfw.bin"
+MODULE_FIRMWARE(BFAD_FW_FILE_CT);
+MODULE_FIRMWARE(BFAD_FW_FILE_CB);
+
+u32 *
+bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+ u32 *bfi_image_size, char *fw_name)
+{
+ const struct firmware *fw;
+
+ if (request_firmware(&fw, fw_name, &pdev->dev)) {
+ printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
+ goto error;
+ }
+
+ *bfi_image = vmalloc(fw->size);
+ if (NULL == *bfi_image) {
+ printk(KERN_ALERT "Fail to allocate buffer for fw image "
+ "size=%x!\n", (u32) fw->size);
+ goto error;
+ }
+
+ memcpy(*bfi_image, fw->data, fw->size);
+ *bfi_image_size = fw->size/sizeof(u32);
+ release_firmware(fw);
+
+ return *bfi_image;
+
+error:
+ return NULL;
+}
+
+u32 *
+bfad_get_firmware_buf(struct pci_dev *pdev)
+{
+ if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
+ if (bfi_image_ct_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_ct,
+ &bfi_image_ct_size, BFAD_FW_FILE_CT);
+ return bfi_image_ct;
+ } else {
+ if (bfi_image_cb_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_cb,
+ &bfi_image_cb_size, BFAD_FW_FILE_CB);
+ return bfi_image_cb;
+ }
+}
+
+u32 *
+bfi_image_ct_get_chunk(u32 off)
+{ return (u32 *)(bfi_image_ct + off); }
+
+u32 *
+bfi_image_cb_get_chunk(u32 off)
+{ return (u32 *)(bfi_image_cb + off); }
diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bna_fn.c net-next-2.6.33-rc5-mod/drivers/net/bna/bna_fn.c
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bna_fn.c 1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bna_fn.c 2010-02-12 01:39:41.070909000 -0800
@@ -0,0 +1,1797 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * @file bna_fn.c BNA Rx and Tx Function Management
+ */
+
+#include "cna.h"
+#include "bna.h"
+#include "bna_hwreg.h"
+#include "bna_priv.h"
+#include "bfi/bfi_ll.h"
+#include "bfi/bfi_cee.h"
+
+/*
+ * 12 bit Max VLAN Id mask used to
+ * wrap overflowing VLANs wraps around the
+ * max value of 4095
+ */
+#define BNA_MAX_VLAN_ID_MASK 0x00000fff
+
+const struct bna_chip_regs_offset reg_offset[] =
+ { {HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
+ HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
+{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
+ HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
+{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
+ HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
+{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
+ HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
+};
+const struct mac bna_bcast_addr = { {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
+const struct mac bna_zero_addr = { {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} };
+
+/**
+ * bna_init()
+ *
+ * Called by the driver during initialization. The driver is
+ * expected to allocate struct bna_dev structure for the BNA layer.
+ *
+ * @return void
+ */
+void bna_init(struct bna_dev *dev, void *bar0, void *stats,
+ struct bna_dma_addr stats_dma, struct bfa_trc_mod *trcmod,
+ struct bfa_log_mod *logmod)
+{
+ u32 pcifn;
+
+ memset(dev, 0, sizeof(struct bna_dev));
+
+ dev->trcmod = trcmod;
+ dev->logmod = logmod;
+
+ dev->bar0 = (u8 *) bar0;
+ dev->hw_stats = (struct bfi_ll_stats *)stats;
+ dev->hw_stats_dma.msb = stats_dma.msb;
+ dev->hw_stats_dma.lsb = stats_dma.lsb;
+
+ dev->rxf_promiscuous_id = BNA_RXF_ID_NONE;
+ dev->rxf_default_id = BNA_RXF_ID_NONE;
+
+ pcifn = readl(dev->bar0 + FNC_ID_REG);
+ pcifn = readl(dev->bar0 + FNC_ID_REG);
+
+ dev->regs.page_addr = dev->bar0 + reg_offset[pcifn].page_addr;
+ dev->regs.fn_int_status = dev->bar0 + reg_offset[pcifn].fn_int_status;
+ dev->regs.fn_int_mask = dev->bar0 + reg_offset[pcifn].fn_int_mask;
+
+ if (pcifn < 3)
+ dev->port = 0;
+ else
+ dev->port = 1;
+
+ dev->pci_fn = pcifn;
+
+ dev->ioc_disable_pending = 0;
+}
+
+/**
+ * bna_rit_config_set()
+ *
+ * Loads RIT entries "rit" into RIT starting from RIT index "rit_id".
+ * Care must be taken not to overlap regions within the RIT.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rit_offset - offset into the RIT
+ * @param[in] rit - RIT entry
+ * @param[in] rit_size - size of RIT entry
+ *
+ * @return void
+ */
+void bna_rit_config_set(struct bna_dev *dev, unsigned int rit_offset,
+ const struct bna_rit_entry rit[], unsigned int rit_size)
+{
+ int i;
+
+ struct bna_rit_mem *rit_mem;
+
+ rit_mem = (struct bna_rit_mem *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, FUNCTION_TO_RXQ_TRANSLATE);
+
+ dev->rit_size[rit_offset] = rit_size;
+
+ writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + dev->port,
+ FUNCTION_TO_RXQ_TRANSLATE), dev->regs.page_addr);
+
+ for (i = 0; i < rit_size; i++)
+ writel(rit[i].large_rxq_id << 6 | rit[i].small_rxq_id,
+ &rit_mem[i + rit_offset]);
+}
+
+/**
+ * bna_rxf_config_set()
+ *
+ * For RxF "rxf_id", it configures RxF based on "cfg_ptr", and indicates
+ * to the statistics collector to collect statistics for this Rx-Function.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] cfg_ptr - pointer to rx-function configuration.
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_config_set(struct bna_dev *dev,
+ unsigned int rxf_id, const struct bna_rxf_config *cfg_ptr)
+{
+ u32 i;
+
+ struct bna_rss_mem *rss_mem;
+ struct bna_rx_fndb_ram *rx_fndb_ram;
+
+ rss_mem = (struct bna_rss_mem *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, RSS_TABLE_BASE_OFFSET);
+ rx_fndb_ram = (struct bna_rx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, RX_FNDB_RAM_BASE_OFFSET);
+
+ if (((cfg_ptr->flags & BNA_RXF_CF_SM_LG_RXQ)) &&
+ (cfg_ptr->hds.type == 1)) {
+ /* HDS and small-large RxQs are mutually exclusive */
+ return BNA_FAIL;
+ }
+
+ if (cfg_ptr->flags & BNA_RXF_CF_RSS_ENABLE) {
+
+ /* configure RSS Table */
+ writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
+ dev->port,
+ RSS_TABLE_BASE_OFFSET), dev->regs.page_addr);
+
+ /* temporarily disable RSS, while hash value is being written */
+ writel(0, &rss_mem[0].type_n_hash);
+
+ for (i = 0; i < BNA_RSS_HASH_KEY_LEN; i++) {
+ writel(
+ htonl(cfg_ptr->rss.
+ toeplitz_hash_key[i]), &rss_mem[0].
+ hash_key[(BNA_RSS_HASH_KEY_LEN - 1) - i]);
+ }
+
+ writel(cfg_ptr->rss.type | cfg_ptr->rss.hash_mask,
+ &rss_mem[0].type_n_hash);
+
+ }
+ /* configure RxF based on "cfg_ptr" */
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2),
+ RX_FNDB_RAM_BASE_OFFSET), dev->regs.page_addr);
+
+ /* we always use RSS table 0 */
+ writel(cfg_ptr->flags & BNA_RXF_CF_RSS_ENABLE,
+ &rx_fndb_ram[rxf_id].rss_prop);
+
+ /* small large buffer enable/disable */
+ writel((cfg_ptr->flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
+ &rx_fndb_ram[rxf_id].size_routing_props);
+
+ /* RIT offset, HDS forced offset, multicast RxQ Id */
+ writel(
+ (cfg_ptr->rit_offset << 16) | (cfg_ptr->hds.
+ forced_offset << 8) |
+ (cfg_ptr->hds.type & BNA_HDS_FORCED) | cfg_ptr->
+ mcast_rxq_id, &rx_fndb_ram[rxf_id].rit_hds_mcastq);
+
+ /*
+ * Default vlan tag, default function enable, strip vlan bytes,
+ * HDS type, header size
+ */
+ writel(
+ (cfg_ptr->default_vlan << 16) |
+ (cfg_ptr->flags &
+ (BNA_RXF_CF_DEFAULT_VLAN |
+ BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
+ BNA_RXF_CF_VLAN_STRIP)) |
+ (cfg_ptr->hds.type & ~BNA_HDS_FORCED) | cfg_ptr->hds.
+ header_size, &rx_fndb_ram[rxf_id].control_flags);
+
+ /* turn on statistics collection for this RxF */
+ dev->rxf_active |= ((u64) 1 << rxf_id);
+ return BNA_OK;
+}
+
+/**
+ * bna_rxf_config_clear()
+ *
+ * For RxF "rxf_id", it clear its configuration and indicates to the
+ * statistics collector to stop collecting statistics for this
+ * Rx-Function.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ *
+ * @return void
+ */
+void
+bna_rxf_config_clear(struct bna_dev *dev, unsigned int rxf_id)
+{
+ struct bna_rx_fndb_ram *rx_fndb_ram;
+
+ rx_fndb_ram = (struct bna_rx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, RX_FNDB_RAM_BASE_OFFSET);
+
+ /* clear configuration of RxF base */
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2),
+ RX_FNDB_RAM_BASE_OFFSET), dev->regs.page_addr);
+
+ /* we always use RSS table 0 */
+ writel(0, &rx_fndb_ram[rxf_id].rss_prop);
+
+ /* small large buffer enable/disable */
+ writel(0x80, &rx_fndb_ram[rxf_id].size_routing_props);
+
+ /* RIT offset, HDS forced offset, multicast RxQ Id */
+ writel(0, &rx_fndb_ram[rxf_id].rit_hds_mcastq);
+
+ /*
+ * default vlan tag, default function enable, strip vlan bytes,
+ * HDS type, header size
+ */
+ writel(0, &rx_fndb_ram[rxf_id].control_flags);
+
+ /* turn off statistics collection for this RxF */
+ dev->rxf_active &= ~((u64) 1 << rxf_id);
+}
+
+/**
+ * bna_rxf_disable()
+ *
+ * Disables the Rx Function without clearing the configuration
+ * Also disables collection of statistics.
+ *
+ * @param[in] dev - Pointer to BNA device handle
+ * @param[in] rxf_id - Id of the Rx Function to be disabled
+ *
+ * @return BNA_OK if mbox command succeeded, else BNA_FAIL
+ */
+enum bna_status
+bna_rxf_disable(struct bna_dev *dev, unsigned int rxf_id)
+{
+ struct bfi_ll_rxf_multi_req ll_req;
+ u64 bit_mask = 1 << rxf_id;
+ enum bna_status status;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
+ ll_req.rxf_id_mask[0] = htonl(lower_32_bits(bit_mask));
+ ll_req.rxf_id_mask[1] = htonl(upper_32_bits(bit_mask));
+ ll_req.enable = 0;
+
+ status = bna_mbox_send(dev, &ll_req, sizeof(ll_req), dev->cbarg);
+ if (!status)
+ dev->rxf_active &= ~bit_mask;
+
+ return status;
+}
+
+/**
+ * bna_rxf_enable()
+ *
+ * Enables the Rx Function
+ *
+ * @param[in] dev - Pointer to BNA device handle
+ * @param[in] rxf_id - Id of the Rx Function to be disabled
+ *
+ * @return BNA_OK if mbox command succeeded, else BNA_FAIL
+ */
+enum bna_status
+bna_rxf_enable(struct bna_dev *dev, unsigned int rxf_id)
+{
+ struct bfi_ll_rxf_multi_req ll_req;
+ u64 bit_mask = 1 << rxf_id;
+ enum bna_status status;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
+ ll_req.rxf_id_mask[0] = htonl(lower_32_bits(bit_mask));
+ ll_req.rxf_id_mask[1] = htonl(upper_32_bits(bit_mask));
+ ll_req.enable = 1;
+
+ status = bna_mbox_send(dev, &ll_req, sizeof(ll_req), dev->cbarg);
+ if (!status)
+ dev->rxf_active |= bit_mask;
+
+ return status;
+}
+
+enum bna_status bna_multi_rxf_active(struct bna_dev *dev,
+ u64 rxf_id_mask, u8 enable)
+{
+ struct bfi_ll_rxf_multi_req ll_req;
+ enum bna_status status;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
+ ll_req.rxf_id_mask[0] = htonl(lower_32_bits(rxf_id_mask));
+ ll_req.rxf_id_mask[1] = htonl(upper_32_bits(rxf_id_mask));
+ ll_req.enable = enable;
+
+ status = bna_mbox_send(dev, &ll_req, sizeof(ll_req), dev->cbarg);
+ if (!status) {
+ if (enable)
+ dev->rxf_active |= rxf_id_mask;
+ else
+ dev->rxf_active &= ~rxf_id_mask;
+
+ }
+ return status;
+}
+
+/**
+ * bna_rxf_ucast_mac_get()
+ *
+ * For RxF "rxf_id", it overwrites the burnt-in unicast MAC with
+ * the one specified by "mac_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID
+ * @param[in] entry - offset into UCAM to read
+ * @param[in] mac_addr_ptr - pointer to mac adddress to set
+ *
+ * @return void
+ */
+void bna_rxf_ucast_mac_get(struct bna_dev *dev, unsigned int *rxf_id,
+ unsigned int entry, const struct mac *mac_addr_ptr)
+{
+ u32 mac_47_32, mac_31_0;
+ u8 *mac_ptr = (u8 *) mac_addr_ptr;
+ struct bna_cam *ucam;
+ struct bna_ucast_mem *ucam_ram;
+
+ ucam = (struct bna_cam *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, UCAST_CAM_BASE_OFFSET);
+ ucam_ram = (struct bna_ucast_mem *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, UCAST_RAM_BASE_OFFSET);
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2), UCAST_RAM_BASE_OFFSET), dev->regs.page_addr);
+
+ /* turn on the bit corresponding to the given RxF */
+ *rxf_id = (readl(&ucam_ram[entry]) & 0x3f);
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2), UCAST_CAM_BASE_OFFSET), dev->regs.page_addr);
+
+ /* add unicast MAC */
+ mac_47_32 = (readl(&ucam[entry].cam_mac_addr_47_32) & 0xffff);
+ mac_31_0 = readl(&ucam[entry].cam_mac_addr_31_0);
+
+ mac_ptr[0] = mac_47_32 >> 8;
+ mac_ptr[1] = mac_47_32 & 0xff;
+
+ mac_ptr[2] = mac_31_0 >> 24;
+ mac_ptr[3] = (mac_31_0 >> 16) & 0xff;
+ mac_ptr[4] = (mac_31_0 >> 8) & 0xff;
+ mac_ptr[5] = mac_31_0 & 0xff;
+}
+
+static enum bna_status bna_rxf_mac_mbox_cmd(struct bna_dev *dev,
+ unsigned int rxf_id, u8 cmd, const struct mac *mac_addr)
+{
+ struct bfi_ll_mac_addr_req req;
+
+ bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
+
+ req.rxf_id = rxf_id;
+ req.mac_addr = *mac_addr;
+
+ /* send command to firmware */
+ return bna_mbox_send(dev, &req, sizeof(req), dev->cbarg);
+}
+
+/**
+ * bna_rxf_ucast_mac_set()
+ *
+ * For RxF "rxf_id", it overwrites the burnt-in unicast MAC with
+ * the one specified by "mac_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] mac_addr_ptr - pointer to mac adddress to set
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_ucast_mac_set(struct bna_dev *dev,
+ unsigned int rxf_id, const struct mac *mac_addr_ptr)
+{
+
+ /* we are supposed to set MAC adresses for default RxF only */
+ if (dev->rxf_default_id == BNA_RXF_ID_NONE) {
+ if (rxf_id != BNA_DEFAULT_RXF_ID)
+ return BNA_FAIL;
+ } else {
+ if (rxf_id != dev->rxf_default_id)
+ return BNA_FAIL;
+ }
+
+ return bna_rxf_mac_mbox_cmd(dev, rxf_id, BFI_LL_H2I_MAC_UCAST_SET_REQ,
+ mac_addr_ptr);
+}
+
+/**
+ * bna_rxf_ucast_mac_add()
+ *
+ * For RxF "rxf_id", it adds the unicast MAC specified by "mac_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] mac_addr_ptr - pointer to mac adddress to add
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_ucast_mac_add(struct bna_dev *dev,
+ unsigned int rxf_id, const struct mac *mac_addr_ptr)
+{
+ /* we are not supposed to add MAC adresses to default RxF */
+ if (rxf_id == dev->rxf_default_id)
+ return BNA_FAIL;
+
+
+ return bna_rxf_mac_mbox_cmd(dev, rxf_id, BFI_LL_H2I_MAC_UCAST_ADD_REQ,
+ mac_addr_ptr);
+}
+
+/**
+ * bna_rxf_ucast_mac_del()
+ *
+ * For RxF "rxf_id", it deletes the unicast MAC specified by "mac_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] mac_addr_ptr - pointer to mac adddress to add
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_ucast_mac_del(struct bna_dev *dev,
+ unsigned int rxf_id, const struct mac *mac_addr_ptr)
+{
+
+ /* we are not supposed to delete MAC adresses from default RxF */
+ if (rxf_id == dev->rxf_default_id)
+ return BNA_FAIL;
+
+
+ return bna_rxf_mac_mbox_cmd(dev, rxf_id, BFI_LL_H2I_MAC_UCAST_DEL_REQ,
+ mac_addr_ptr);
+}
+
+/**
+ * bna_rxf_mcast_mac_add()
+ *
+ * For RxF "rxf_id", it adds the multicast MAC specified by
+ * "mac_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] mac_addr_ptr - pointer to mac adddress to add
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_mcast_mac_add(struct bna_dev *dev,
+ unsigned int rxf_id, const struct mac *mac_addr_ptr)
+{
+ u32 i;
+
+ for (i = 0; i < BNA_MCAST_TABLE_SIZE; i++) {
+ if (BNA_MAC_IS_EQUAL(&dev->mcast_addr[i], mac_addr_ptr))
+ break;
+ }
+
+ if (i == BNA_MCAST_TABLE_SIZE) {
+ /*
+ * no existing entry found we need to find the
+ * first unused entry
+ */
+ for (i = 0; i < BNA_MCAST_TABLE_SIZE; i++) {
+ /* unused entry found, stop and use it */
+ if (BNA_MAC_IS_EQUAL
+ (&dev->mcast_addr[i], &bna_zero_addr))
+ break;
+ }
+ }
+
+ if (i == BNA_MCAST_TABLE_SIZE) {
+ /* no entry available, table full */
+ return BNA_FAIL;
+ }
+
+ dev->mcast_addr[i] = *mac_addr_ptr;
+
+ return bna_rxf_mac_mbox_cmd(dev, rxf_id, BFI_LL_H2I_MAC_MCAST_ADD_REQ,
+ mac_addr_ptr);
+}
+
+/**
+ * bna_rxf_mcast_mac_del()
+ *
+ * For RxF "rxf_id", it deletes the multicast MAC specified by
+ * "mac_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] mac_addr_ptr - pointer to mac adddress to add
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_mcast_mac_del(struct bna_dev *dev,
+ unsigned int rxf_id, const struct mac *mac_addr_ptr)
+{
+ u32 i;
+
+ for (i = 0; i < BNA_MCAST_TABLE_SIZE; i++) {
+ if (BNA_MAC_IS_EQUAL(&dev->mcast_addr[i], mac_addr_ptr))
+ break;
+ }
+
+ if (i == BNA_MCAST_TABLE_SIZE) {
+ /* no existing entry found */
+ return BNA_FAIL;
+ }
+ dev->mcast_addr[i] = bna_zero_addr;
+
+ return bna_rxf_mac_mbox_cmd(dev, rxf_id, BFI_LL_H2I_MAC_MCAST_DEL_REQ,
+ mac_addr_ptr);
+}
+
+/**
+ * bna_rxf_mcast_mac_set_list()
+ *
+ * For RxF "rxf_id", it sets the multicast MAC addresses
+ * specified by "mac_addr_ptr". The function first deletes the MAC addresses in
+ * the existing list that is not found in the new list. It then adds the new
+ * addresses that are in the new list but not in the old list. It then replaces
+ * the old list with the new list in the bna_dev structure.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] mac_addr_ptr - pointer to the list of mac
+ * adddresses to set
+ * @param[in] mac_addr_num - number of mac addresses in the
+ * list
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_mcast_mac_set_list(struct bna_dev *dev,
+ unsigned int rxf_id, const struct mac *mac_addr_ptr,
+ unsigned int mac_addr_num)
+{
+ u32 i, j;
+ int found;
+
+ if (mac_addr_num > BNA_MCAST_TABLE_SIZE) {
+ pr_info("Too many Multicast Addresses [%d]",
+ mac_addr_num);
+ return BNA_FAIL;
+ }
+
+ /* find MAC addresses to delete */
+ for (i = 0; i < BNA_MCAST_TABLE_SIZE; i++) {
+ if (BNA_MAC_IS_EQUAL(&dev->mcast_addr[i], &bna_zero_addr))
+ continue;
+ found = 0;
+ for (j = 0; j < mac_addr_num; j++) {
+ if (BNA_MAC_IS_EQUAL
+ (&mac_addr_ptr[j], &dev->mcast_addr[i])) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ if (BNA_FAIL ==
+ bna_rxf_mac_mbox_cmd(dev, rxf_id,
+ BFI_LL_H2I_MAC_MCAST_DEL_REQ,
+ &dev->mcast_addr[i])) {
+ return BNA_FAIL;
+ }
+ }
+ }
+
+ /* find MAC addresses to add */
+ for (i = 0; i < mac_addr_num; i++) {
+ found = 0;
+ for (j = 0; j < BNA_MCAST_TABLE_SIZE; j++) {
+ if (BNA_MAC_IS_EQUAL
+ (&mac_addr_ptr[i], &dev->mcast_addr[j])) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ if (BNA_FAIL ==
+ bna_rxf_mac_mbox_cmd(dev, rxf_id,
+ BFI_LL_H2I_MAC_MCAST_ADD_REQ,
+ &mac_addr_ptr[i])) {
+ return BNA_FAIL;
+ }
+ }
+ }
+
+ memset(&dev->mcast_addr[0], 0, sizeof(dev->mcast_addr));
+ memcpy(&dev->mcast_addr[0], mac_addr_ptr,
+ mac_addr_num * sizeof(struct mac));
+
+ return BNA_OK;
+}
+
+/**
+ * bna_mcast_mac_reset_list()
+ *
+ * Resets the multicast MAC address list kept by driver.
+ * Called when the hw gets reset.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ *
+ * @return void
+ */
+void
+bna_mcast_mac_reset_list(struct bna_dev *dev)
+{
+ memset(&dev->mcast_addr[0], 0, sizeof(dev->mcast_addr));
+}
+
+/**
+ * bna_rxf_broadcast()
+ *
+ * For RxF "rxf_id", it enables/disables the broadcast address.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] enable - enable/disable broadcast address
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_broadcast(struct bna_dev *dev,
+ unsigned int rxf_id, enum bna_enable enable)
+{
+ if (enable)
+ return bna_rxf_mcast_mac_add(dev, rxf_id, &bna_bcast_addr);
+
+ return bna_rxf_mcast_mac_del(dev, rxf_id, &bna_bcast_addr);
+}
+
+/**
+ * bna_rxf_vlan_add()
+ *
+ * For RxF "rxf_id", it adds this function as a member of the
+ * specified "vlan_id".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] vlan_id - VLAN id to be added
+ *
+ * @return void
+ */
+void bna_rxf_vlan_add(struct bna_dev *dev, unsigned int rxf_id,
+ unsigned int vlan_id)
+{
+
+ u32 new_vlan_id;
+
+ /*
+ * wrap the vlan_id around in case it
+ * overflows the max limit
+ */
+ new_vlan_id = vlan_id & BNA_VLAN_ID_MAX;
+ BNA_BIT_TABLE_SET(dev->vlan_table[rxf_id], new_vlan_id);
+
+ if (dev->vlan_filter_enable[rxf_id] &&
+ (dev->rxf_active & ((u64) 1 << rxf_id))) {
+ /* add VLAN ID on this function */
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2),
+ VLAN_RAM_BASE_OFFSET), dev->regs.page_addr);
+ writel(
+ dev->vlan_table[rxf_id][new_vlan_id / 32],
+ BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (dev->bar0, rxf_id, new_vlan_id));
+ }
+}
+
+/**
+ * bna_rxf_vlan_del()
+ *
+ * For RxF "rxf_id", it removes this function as a member of the
+ * specified "vlan_id".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] vlan_id - VLAN id to be removed
+ *
+ * @return void
+ */
+void bna_rxf_vlan_del(struct bna_dev *dev, unsigned int rxf_id,
+ unsigned int vlan_id)
+{
+
+ u32 new_vlan_id;
+ new_vlan_id = vlan_id & BNA_VLAN_ID_MAX;
+ BNA_BIT_TABLE_CLEAR(dev->vlan_table[rxf_id], new_vlan_id);
+
+ if (dev->vlan_filter_enable[rxf_id] &&
+ (dev->rxf_active & ((u64) 1 << rxf_id))) {
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2),
+ VLAN_RAM_BASE_OFFSET), dev->regs.page_addr);
+ writel(
+ dev->vlan_table[rxf_id][new_vlan_id / 32],
+ BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (dev->bar0, rxf_id, new_vlan_id));
+ }
+}
+
+/**
+ * bna_rxf_vlan_filter()
+ *
+ * For RxF "rxf_id", it enables/disables the VLAN filter.
+ * Disabling the VLAN Filter allows reception of any VLAN-tagged frame.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] enable - enable/disable VLAN Filtering.
+ *
+ * @return void
+ */
+void bna_rxf_vlan_filter(struct bna_dev *dev, unsigned int rxf_id,
+ enum bna_enable enable)
+{
+ u32 i;
+
+ dev->vlan_filter_enable[rxf_id] = enable;
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2), VLAN_RAM_BASE_OFFSET), dev->regs.page_addr);
+
+ if (enable) {
+ /* enable VLAN filtering on this function */
+ for (i = 0; i <= BNA_VLAN_ID_MAX / 32; i++) {
+ writel(
+ dev->vlan_table[rxf_id][i],
+ BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (dev->bar0, rxf_id, i * 32));
+ }
+ } else {
+ /* disable VLAN filtering on this function */
+ for (i = 0; i <= BNA_VLAN_ID_MAX / 32; i++) {
+ writel(0xffffffff, BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (dev->bar0, rxf_id, i * 32));
+ }
+ }
+}
+
+/**
+ * bna_rxf_vlan_del_all()
+ *
+ * For RxF "rxf_id", it clears all the VLANs.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ *
+ * @return void
+ */
+void
+bna_rxf_vlan_del_all(struct bna_dev *dev, unsigned int rxf_id)
+{
+ u32 i;
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2), VLAN_RAM_BASE_OFFSET), dev->regs.page_addr);
+
+ /* clear all VLANs for this function */
+ for (i = 0; i <= BNA_VLAN_ID_MAX / 32; i++) {
+ writel(0, BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (dev->bar0, rxf_id, i * 32));
+ }
+}
+
+/**
+ * bna_rxf_mcast_filter()
+ *
+ * For RxF "rxf_id", it enables/disables the multicast filter.
+ * Disabling the multicast filter allows reception of any
+ * multicast frame.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] enable - enable/disable multicast Filtering.
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_mcast_filter(struct bna_dev *dev,
+ unsigned int rxf_id, enum bna_enable enable)
+{
+
+ struct bfi_ll_mcast_filter_req cmd;
+
+ bfi_h2i_set(cmd.mh, BFI_MC_LL, BFI_LL_H2I_MAC_MCAST_FILTER_REQ, 0);
+
+ cmd.rxf_id = rxf_id;
+ cmd.enable = enable;
+
+ /* send command to firmware */
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+}
+
+/**
+ * bna_rxf_mcast_del_all()
+ *
+ * For RxF "rxf_id", it clears the MCAST cam and MVT.
+ * This functionality is required by some of the drivers.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_mcast_del_all(struct bna_dev *dev,
+ unsigned int rxf_id)
+{
+ struct bfi_ll_mcast_del_all_req cmd;
+
+ bfi_h2i_set(cmd.mh, BFI_MC_LL, BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ, 0);
+
+ cmd.rxf_id = rxf_id;
+
+ /* send command to firmware */
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+}
+
+/**
+ * bna_rxf_promiscuous()
+ *
+ * For RxF "rxf_id", it enables/disables promiscuous mode.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] enable - enable/disable promiscious mode
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_promiscuous(struct bna_dev *dev,
+ unsigned int rxf_id, enum bna_enable enable)
+{
+ struct bfi_ll_rxf_req cmd;
+
+ bfi_h2i_set(cmd.mh, BFI_MC_LL, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ, 0);
+
+ cmd.rxf_id = rxf_id;
+ cmd.enable = enable;
+
+ if (enable &&
+ ((dev->rxf_promiscuous_id == BNA_RXF_ID_NONE) ||
+ (dev->rxf_promiscuous_id == rxf_id))) {
+ dev->rxf_promiscuous_id = rxf_id;
+
+ /* allow all VLANs */
+ bna_rxf_vlan_filter(dev, rxf_id, BNA_DISABLE);
+
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+ } else if (!enable && (dev->rxf_promiscuous_id == rxf_id)) {
+ dev->rxf_promiscuous_id = BNA_RXF_ID_NONE;
+
+ /* Revert VLAN filtering */
+ bna_rxf_vlan_filter(dev, rxf_id, BNA_ENABLE);
+
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+ }
+
+ return BNA_FAIL;
+}
+
+/**
+ * bna_rxf_default_mode()
+ *
+ * For RxF "rxf_id", it enables/disables default mode.
+ * Must be called after the RxF has been configured.
+ * Must remove all unicast MAC associated to this RxF.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[in] enable - enable/disable default mode
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_default_mode(struct bna_dev *dev,
+ unsigned int rxf_id, enum bna_enable enable)
+{
+ struct bna_rx_fndb_ram *rx_fndb_ram;
+ u32 i, ctl_flags;
+ struct bfi_ll_rxf_req cmd;
+
+ rx_fndb_ram = (struct bna_rx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, RX_FNDB_RAM_BASE_OFFSET);
+
+ bfi_h2i_set(cmd.mh, BFI_MC_LL, BFI_LL_H2I_RXF_DEFAULT_SET_REQ, 0);
+
+ cmd.rxf_id = rxf_id;
+ cmd.enable = enable;
+
+ if (enable &&
+ ((dev->rxf_default_id == BNA_RXF_ID_NONE) ||
+ (dev->rxf_default_id == rxf_id))) {
+ dev->rxf_default_id = rxf_id;
+
+ /* allow all VLANs */
+ bna_rxf_vlan_filter(dev, rxf_id, BNA_DISABLE);
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2),
+ RX_FNDB_RAM_BASE_OFFSET), dev->regs.page_addr);
+
+ for (i = 0; i < BNA_RXF_ID_MAX; i++) {
+ if (i == rxf_id)
+ continue;
+
+ ctl_flags =
+ readl(&rx_fndb_ram[i].control_flags);
+ ctl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+ writel(ctl_flags, &rx_fndb_ram[i].control_flags);
+ }
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+ } else if (!enable && (dev->rxf_default_id == rxf_id)) {
+ dev->rxf_default_id = BNA_RXF_ID_NONE;
+
+ /* Revert VLAN filtering */
+ bna_rxf_vlan_filter(dev, rxf_id,
+ dev->vlan_filter_enable[rxf_id]);
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2),
+ RX_FNDB_RAM_BASE_OFFSET), dev->regs.page_addr);
+
+ for (i = 0; i < BNA_RXF_ID_MAX; i++) {
+ ctl_flags =
+ readl(&rx_fndb_ram[i].control_flags);
+ ctl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+ writel(ctl_flags, &rx_fndb_ram[i].control_flags);
+ }
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+ }
+ return BNA_FAIL;
+}
+
+/**
+ * bna_rxf_frame_stats_get()
+ *
+ * For RxF "rxf_id", it loads frame statistics into "stats_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] rxf_id - rx-function ID.
+ * @param[out] stats_ptr - pointer to stats structure to fill
+ *
+ * @return void
+ */
+void bna_rxf_frame_stats_get(struct bna_dev *dev, unsigned int rxf_id,
+ struct bna_stats_rxf **stats_ptr)
+{
+ *stats_ptr = &dev->stats.rxf_stats[rxf_id];
+}
+
+/**
+ * bna_txf_frame_stats_get()
+ *
+ * For TxF "txf_id", it loads frame statistics into "stats_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] txf_id - tx-function ID.
+ * @param[out] stats_ptr - pointer to tx-function statistics.
+ *
+ * @return void
+ */
+void bna_txf_frame_stats_get(struct bna_dev *dev, unsigned int txf_id,
+ struct bna_stats_txf **stats_ptr)
+{
+
+ *stats_ptr = &dev->stats.txf_stats[txf_id];
+}
+
+/**
+ * bna_mac_rx_stats_get()
+ *
+ * Loads MAC Rx statistics into "stats_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+
+ * @param[out] stats_ptr - pointer to stats structure to fill
+ *
+ * @return void
+ */
+void bna_mac_rx_stats_get(struct bna_dev *dev,
+ struct cna_stats_mac_rx **stats_ptr)
+{
+ *stats_ptr = &dev->stats.mac_rx_stats;
+}
+
+/**
+ * bna_mac_tx_stats_get()
+ *
+ * Loads MAC Tx statistics into "stats_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+
+ * @param[out] stats_ptr - pointer to stats structure to fill
+ *
+ * @return void
+ */
+void bna_mac_tx_stats_get(struct bna_dev *dev,
+ struct cna_stats_mac_tx **stats_ptr)
+{
+ *stats_ptr = &dev->stats.mac_tx_stats;
+}
+
+/**
+ * bna_all_stats_get()
+ *
+ * Loads all statistics into "stats_ptr".
+ *
+ * @param[in] dev - pointer to BNA device structure
+
+ * @param[out] stats_ptr - pointer to stats structure
+ *
+ * @return void
+ */
+void
+bna_all_stats_get(struct bna_dev *dev, struct bna_stats **stats_ptr)
+{
+ *stats_ptr = &dev->stats;
+}
+
+/**
+ * bna_stats_get()
+ *
+ * Get the statistics from the device. This function needs to
+ * be scheduled every second to get periodic update of the
+ * statistics data from hardware.
+ *
+ * @param[in] dev - pointer to BNA device structure.
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status
+bna_stats_get(struct bna_dev *dev)
+{
+ struct bfi_ll_stats_req cmd;
+
+ bfi_h2i_set(cmd.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
+
+ cmd.stats_mask = htons(BFI_LL_STATS_ALL);
+ cmd.rxf_id_mask[0] =
+ htonl((u32) (dev->rxf_active & 0xffffffff));
+ cmd.rxf_id_mask[1] = htonl((u32) (dev->rxf_active >> 32));
+
+ cmd.txf_id_mask[0] =
+ htonl((u32) (dev->txf_active & 0xffffffff));
+ cmd.txf_id_mask[1] = htonl((u32) (dev->txf_active >> 32));
+
+ cmd.host_buffer.a32.addr_hi = dev->hw_stats_dma.msb;
+ cmd.host_buffer.a32.addr_lo = dev->hw_stats_dma.lsb;
+
+ dev->rxf_active_last = dev->rxf_active;
+ dev->txf_active_last = dev->txf_active;
+
+ /* send command to firmware */
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+}
+
+/**
+ * bna_stats_clear()
+ *
+ * Clear the statistics in the device.
+ *
+ * @param[in] dev - pointer to BNA device structure.
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_stats_clear(struct bna_dev *dev,
+ u64 txf_id_mask, u64 rxf_id_mask)
+{
+ struct bfi_ll_stats_req cmd;
+
+ bfi_h2i_set(cmd.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+
+ cmd.stats_mask = htons(BFI_LL_STATS_ALL);
+ cmd.rxf_id_mask[0] = htonl(lower_32_bits(rxf_id_mask));
+ cmd.rxf_id_mask[1] = htonl(upper_32_bits(rxf_id_mask));
+
+ cmd.txf_id_mask[0] = htonl(lower_32_bits(txf_id_mask));
+ cmd.txf_id_mask[1] = htonl(upper_32_bits(txf_id_mask));
+
+ /* send command to firmware */
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+}
+
+/**
+ * bna_rxf_stats_clear()
+ *
+ * Clear the statistics for specified txf.
+ *
+ * @param[in] dev - pointer to BNA device structure.
+ * @param[in] rxf_id - rx-function ID.
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_rxf_stats_clear(struct bna_dev *dev,
+ unsigned int rxf_id)
+{
+ struct bfi_ll_stats_req cmd;
+
+ bfi_h2i_set(cmd.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+
+ cmd.stats_mask = 0;
+
+ if (rxf_id < 32) {
+ cmd.rxf_id_mask[0] = htonl((u32) (1 << rxf_id));
+ cmd.rxf_id_mask[1] = 0;
+ } else {
+ cmd.rxf_id_mask[0] = 0;
+ cmd.rxf_id_mask[1] =
+ htonl((u32) (1 << (rxf_id - 32)));
+ }
+
+ cmd.txf_id_mask[0] = 0;
+ cmd.txf_id_mask[1] = 0;
+
+ /* send command to firmware */
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+}
+
+/**
+ * bna_lldp_stats_clear()
+ *
+ * Clear the DCBCX-LLDP statistics in the f/w.
+ *
+ * @param[in] dev - pointer to BNA device structure.
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status
+bna_lldp_stats_clear(struct bna_dev *dev)
+{
+ struct bfi_lldp_reset_stats cmd;
+
+ bfi_h2i_set(cmd.mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS, 0);
+
+ /* send command to firmware */
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+}
+
+/**
+ * bna_get_cfg_req()
+ *
+ * Request to get the LLDP-DCBCX Config.
+ *
+ * @param[in] dev - pointer to BNA device structure.
+ * @param[in] dma_ddr - dma address in "bna_dma_addr_t" format.
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_get_cfg_req(struct bna_dev *dev,
+ struct bna_dma_addr *dma_addr)
+{
+ struct bfi_cee_get_req cmd;
+
+ bfi_h2i_set(cmd.mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, 0);
+ cmd.dma_addr.a32.addr_lo = dma_addr->lsb;
+ cmd.dma_addr.a32.addr_hi = dma_addr->msb;
+ /* send command to firmware */
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+}
+
+/**
+ * bna_get_cee_stats_req()
+ *
+ * Request to get the LLDP-DCBCX stats.
+ *
+ * @param[in] dev - pointer to BNA device structure.
+ * @param[in] dma_ddr - dma address in "bna_dma_addr_t" format.
+ *
+ * @return BNA_OK - successful
+ * @return BNA_FAIL - failed on sanity checks.
+ */
+enum bna_status bna_get_cee_stats_req(struct bna_dev *dev,
+ struct bna_dma_addr *dma_addr)
+{
+ struct bfi_cee_get_req cmd;
+
+ bfi_h2i_set(cmd.mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ, 0);
+ cmd.dma_addr.a32.addr_lo = dma_addr->lsb;
+ cmd.dma_addr.a32.addr_hi = dma_addr->msb;
+ /* send command to firmware */
+ return bna_mbox_send(dev, &cmd, sizeof(cmd), dev->cbarg);
+}
+
+/**
+ * bna_stats_process()
+ *
+ * Process the statistics data DMAed from the device. This
+ * function needs to be scheduled upon getting an asynchronous
+ * notification from the firmware.
+ *
+ * @param[in] dev - pointer to BNA device structure.
+ *
+ * @return void
+ */
+void
+bna_stats_process(struct bna_dev *dev)
+{
+ u32 i, j;
+ struct bna_stats_rxf *rxf_hw_stats;
+ struct bna_stats_txf *txf_hw_stats;
+
+ dev->stats.fc_tx_stats.txf_ucast_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.
+ txf_ucast_octets);
+ dev->stats.fc_tx_stats.txf_ucast =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.txf_ucast);
+ dev->stats.fc_tx_stats.txf_ucast_vlan =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.
+ txf_ucast_vlan);
+
+ dev->stats.fc_tx_stats.txf_mcast_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.
+ txf_mcast_octets);
+ dev->stats.fc_tx_stats.txf_mcast =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.txf_mcast);
+ dev->stats.fc_tx_stats.txf_mcast_vlan =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.
+ txf_mcast_vlan);
+
+ dev->stats.fc_tx_stats.txf_bcast_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.
+ txf_bcast_octets);
+ dev->stats.fc_tx_stats.txf_bcast =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.txf_bcast);
+ dev->stats.fc_tx_stats.txf_bcast_vlan =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.
+ txf_bcast_vlan);
+
+ dev->stats.fc_tx_stats.txf_parity_errors =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.
+ txf_parity_errors);
+ dev->stats.fc_tx_stats.txf_timeout =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.txf_timeout);
+ dev->stats.fc_tx_stats.txf_fid_parity_errors =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_tx_stats.
+ txf_fid_parity_errors);
+
+ for (i = 0; i < 8; i++) {
+ dev->stats.bpc_tx_stats.tx_pause[i] =
+ bna_hw_stats_to_stats(dev->hw_stats->bpc_stats.
+ tx_pause[i]);
+ dev->stats.bpc_tx_stats.tx_zero_pause[i] =
+ bna_hw_stats_to_stats(dev->hw_stats->bpc_stats.
+ tx_zero_pause[i]);
+ dev->stats.bpc_tx_stats.tx_first_pause[i] =
+ bna_hw_stats_to_stats(dev->hw_stats->bpc_stats.
+ tx_first_pause[i]);
+ }
+
+ dev->stats.mac_tx_stats.tx_bytes =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_bytes);
+ dev->stats.mac_tx_stats.tx_packets =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_packets);
+ dev->stats.mac_tx_stats.tx_multicast =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_multicast);
+ dev->stats.mac_tx_stats.tx_broadcast =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_broadcast);
+ dev->stats.mac_tx_stats.tx_pause =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_pause);
+ dev->stats.mac_tx_stats.tx_deferral =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_deferral);
+ dev->stats.mac_tx_stats.tx_excessive_deferral =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ tx_excessive_deferral);
+ dev->stats.mac_tx_stats.tx_single_collision =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ tx_single_collision);
+ dev->stats.mac_tx_stats.tx_muliple_collision =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ tx_muliple_collision);
+ dev->stats.mac_tx_stats.tx_late_collision =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ tx_late_collision);
+ dev->stats.mac_tx_stats.tx_excessive_collision =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ tx_excessive_collision);
+ dev->stats.mac_tx_stats.tx_total_collision =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ tx_total_collision);
+ dev->stats.mac_tx_stats.tx_pause_honored =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ tx_pause_honored);
+ dev->stats.mac_tx_stats.tx_drop =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_drop);
+ dev->stats.mac_tx_stats.tx_jabber =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_jabber);
+ dev->stats.mac_tx_stats.tx_fcs_error =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_fcs_error);
+ dev->stats.mac_tx_stats.tx_control_frame =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ tx_control_frame);
+ dev->stats.mac_tx_stats.tx_oversize =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_oversize);
+ dev->stats.mac_tx_stats.tx_undersize =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_undersize);
+ dev->stats.mac_tx_stats.tx_fragments =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.tx_fragments);
+
+ dev->stats.fc_rx_stats.rxf_ucast_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_rx_stats.
+ rxf_ucast_octets);
+ dev->stats.fc_rx_stats.rxf_ucast =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_rx_stats.rxf_ucast);
+ dev->stats.fc_rx_stats.rxf_ucast_vlan =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_rx_stats.
+ rxf_ucast_vlan);
+
+ dev->stats.fc_rx_stats.rxf_mcast_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_rx_stats.
+ rxf_mcast_octets);
+ dev->stats.fc_rx_stats.rxf_mcast =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_rx_stats.rxf_mcast);
+ dev->stats.fc_rx_stats.rxf_mcast_vlan =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_rx_stats.
+ rxf_mcast_vlan);
+
+ dev->stats.fc_rx_stats.rxf_bcast_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_rx_stats.
+ rxf_bcast_octets);
+ dev->stats.fc_rx_stats.rxf_bcast =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_rx_stats.rxf_bcast);
+ dev->stats.fc_rx_stats.rxf_bcast_vlan =
+ bna_hw_stats_to_stats(dev->hw_stats->fc_rx_stats.
+ rxf_bcast_vlan);
+
+ for (i = 0; i < 8; i++) {
+ dev->stats.bpc_rx_stats.rx_pause[i] =
+ bna_hw_stats_to_stats(dev->hw_stats->bpc_stats.
+ rx_pause[i]);
+ dev->stats.bpc_rx_stats.rx_zero_pause[i] =
+ bna_hw_stats_to_stats(dev->hw_stats->bpc_stats.
+ rx_zero_pause[i]);
+ dev->stats.bpc_rx_stats.rx_first_pause[i] =
+ bna_hw_stats_to_stats(dev->hw_stats->bpc_stats.
+ rx_first_pause[i]);
+ }
+
+ dev->stats.rad_stats.rx_frames =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_frames);
+ dev->stats.rad_stats.rx_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_octets);
+ dev->stats.rad_stats.rx_vlan_frames =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_vlan_frames);
+
+ dev->stats.rad_stats.rx_ucast =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_ucast);
+ dev->stats.rad_stats.rx_ucast_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_ucast_octets);
+ dev->stats.rad_stats.rx_ucast_vlan =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_ucast_vlan);
+
+ dev->stats.rad_stats.rx_mcast =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_mcast);
+ dev->stats.rad_stats.rx_mcast_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_mcast_octets);
+ dev->stats.rad_stats.rx_mcast_vlan =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_mcast_vlan);
+
+ dev->stats.rad_stats.rx_bcast =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_bcast);
+ dev->stats.rad_stats.rx_bcast_octets =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_bcast_octets);
+ dev->stats.rad_stats.rx_bcast_vlan =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_bcast_vlan);
+
+ dev->stats.rad_stats.rx_drops =
+ bna_hw_stats_to_stats(dev->hw_stats->rad_stats.rx_drops);
+
+ dev->stats.mac_rx_stats.frame_64 =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.frame_64);
+ dev->stats.mac_rx_stats.frame_65_127 =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.frame_65_127);
+ dev->stats.mac_rx_stats.frame_128_255 =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.frame_128_255);
+ dev->stats.mac_rx_stats.frame_256_511 =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.frame_256_511);
+ dev->stats.mac_rx_stats.frame_512_1023 =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.frame_512_1023);
+ dev->stats.mac_rx_stats.frame_1024_1518 =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.frame_1024_1518);
+ dev->stats.mac_rx_stats.frame_1518_1522 =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.frame_1519_1522);
+ dev->stats.mac_rx_stats.rx_bytes =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_bytes);
+ dev->stats.mac_rx_stats.rx_packets =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_packets);
+ dev->stats.mac_rx_stats.rx_fcs_error =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_fcs_error);
+ dev->stats.mac_rx_stats.rx_multicast =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_multicast);
+ dev->stats.mac_rx_stats.rx_broadcast =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_broadcast);
+ dev->stats.mac_rx_stats.rx_control_frames =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ rx_control_frames);
+ dev->stats.mac_rx_stats.rx_pause =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_pause);
+ dev->stats.mac_rx_stats.rx_unknown_opcode =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ rx_unknown_opcode);
+ dev->stats.mac_rx_stats.rx_alignment_error =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ rx_alignment_error);
+ dev->stats.mac_rx_stats.rx_frame_length_error =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ rx_frame_length_error);
+ dev->stats.mac_rx_stats.rx_code_error =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_code_error);
+ dev->stats.mac_rx_stats.rx_carrier_sense_error =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.
+ rx_carrier_sense_error);
+ dev->stats.mac_rx_stats.rx_undersize =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_undersize);
+ dev->stats.mac_rx_stats.rx_oversize =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_oversize);
+ dev->stats.mac_rx_stats.rx_fragments =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_jabber);
+ dev->stats.mac_rx_stats.rx_jabber =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_jabber);
+ dev->stats.mac_rx_stats.rx_drop =
+ bna_hw_stats_to_stats(dev->hw_stats->mac_stats.rx_drop);
+
+ rxf_hw_stats = (struct bna_stats_rxf *)&dev->hw_stats->rxf_stats[0];
+ j = 0;
+
+ for (i = 0; i < BNA_RXF_ID_MAX; i++) {
+ if (dev->rxf_active_last & ((u64) 1 << i)) {
+ dev->stats.rxf_stats[i].ucast_octets =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].
+ ucast_octets);
+ dev->stats.rxf_stats[i].ucast =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].ucast);
+ dev->stats.rxf_stats[i].ucast_vlan =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].
+ ucast_vlan);
+
+ dev->stats.rxf_stats[i].mcast_octets =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].
+ mcast_octets);
+ dev->stats.rxf_stats[i].mcast =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].mcast);
+ dev->stats.rxf_stats[i].mcast_vlan =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].
+ mcast_vlan);
+
+ dev->stats.rxf_stats[i].bcast_octets =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].
+ bcast_octets);
+ dev->stats.rxf_stats[i].bcast =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].bcast);
+ dev->stats.rxf_stats[i].bcast_vlan =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].
+ bcast_vlan);
+
+ dev->stats.rxf_stats[i].frame_drops =
+ bna_hw_stats_to_stats(rxf_hw_stats[j].
+ frame_drops);
+
+ j++;
+ }
+ }
+
+ txf_hw_stats = (struct bna_stats_txf *)&rxf_hw_stats[j];
+ j = 0;
+
+ for (i = 0; i < BNA_TXF_ID_MAX; i++) {
+ if (dev->txf_active_last & ((u64) 1 << i)) {
+ dev->stats.txf_stats[i].ucast_octets =
+ bna_hw_stats_to_stats(txf_hw_stats[j].
+ ucast_octets);
+ dev->stats.txf_stats[i].ucast =
+ bna_hw_stats_to_stats(txf_hw_stats[j].ucast);
+ dev->stats.txf_stats[i].ucast_vlan =
+ bna_hw_stats_to_stats(txf_hw_stats[j].
+ ucast_vlan);
+
+ dev->stats.txf_stats[i].mcast_octets =
+ bna_hw_stats_to_stats(txf_hw_stats[j].
+ mcast_octets);
+ dev->stats.txf_stats[i].mcast =
+ bna_hw_stats_to_stats(txf_hw_stats[j].mcast);
+ dev->stats.txf_stats[i].mcast_vlan =
+ bna_hw_stats_to_stats(txf_hw_stats[j].
+ mcast_vlan);
+
+ dev->stats.txf_stats[i].bcast_octets =
+ bna_hw_stats_to_stats(txf_hw_stats[j].
+ bcast_octets);
+ dev->stats.txf_stats[i].bcast =
+ bna_hw_stats_to_stats(txf_hw_stats[j].bcast);
+ dev->stats.txf_stats[i].bcast_vlan =
+ bna_hw_stats_to_stats(txf_hw_stats[j].
+ bcast_vlan);
+
+ dev->stats.txf_stats[i].errors =
+ bna_hw_stats_to_stats(txf_hw_stats[j].errors);
+ dev->stats.txf_stats[i].filter_vlan =
+ bna_hw_stats_to_stats(txf_hw_stats[j].
+ filter_vlan);
+ dev->stats.txf_stats[i].filter_mac_sa =
+ bna_hw_stats_to_stats(txf_hw_stats[j].
+ filter_mac_sa);
+
+ j++;
+ }
+ }
+}
+
+/**
+ * bna_txf_config_set()
+ *
+ * For TxF "txf_id", it configures the TxF specified by "cfg_ptr" and
+ * indicates to the statistics collector to collect statistics for this
+ * Tx-Function.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] txf_id - tx-function ID.
+ * @param[in] cfg_ptr - pointer to tx-function configuration.
+ *
+ * @return void
+ */
+void bna_txf_config_set(struct bna_dev *dev, unsigned int txf_id,
+ const struct bna_txf_config *cfg_ptr)
+{
+
+ struct bna_tx_fndb_ram *tx_fndb;
+
+ tx_fndb = (struct bna_tx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, TX_FNDB_RAM_BASE_OFFSET);
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2),
+ TX_FNDB_RAM_BASE_OFFSET), dev->regs.page_addr);
+
+ writel((cfg_ptr->vlan << 16) | cfg_ptr->flags, &tx_fndb[txf_id]);
+
+ /* turn on statistics collection */
+ dev->txf_active |= ((u64) 1 << txf_id);
+}
+
+/**
+ * bna_txf_config_clear()
+ *
+ * For TxF "txf_id", it clears its configuration and indicates to the
+ * statistics collector to stop collecting statistics for this
+ * Tx-Function.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] txf_id - tx-function ID.
+ *
+ * @return void
+ */
+void
+bna_txf_config_clear(struct bna_dev *dev, unsigned int txf_id)
+{
+
+ struct bna_tx_fndb_ram *tx_fndb;
+
+ tx_fndb = (struct bna_tx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, TX_FNDB_RAM_BASE_OFFSET);
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (dev->port * 2),
+ TX_FNDB_RAM_BASE_OFFSET), dev->regs.page_addr);
+
+ writel(0, &tx_fndb[txf_id]);
+
+ /* turn off statistics collection */
+ dev->txf_active &= ~((u64) 1 << txf_id);
+}
+
+/**
+ * bna_txf_disable()
+ *
+ * Disables the Tx Function without clearing the configuration
+ * Also disables collection of statistics.
+ *
+ * @param[in] bna_dev - Pointer to BNA device handle
+ * @param[in] txf_id - Id of the Tx Function to be disabled
+ *
+ * @return void
+ */
+void
+bna_txf_disable(struct bna_dev *dev, unsigned int txf_id)
+{
+ struct bna_tx_fndb_ram *tx_fndb;
+ u32 page_num, ctl_flags;
+
+ tx_fndb = (struct bna_tx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, TX_FNDB_RAM_BASE_OFFSET);
+
+ /* Write the page number register */
+ page_num =
+ BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM + (dev->port * 2),
+ TX_FNDB_RAM_BASE_OFFSET);
+ writel(page_num, dev->regs.page_addr);
+
+ ctl_flags = readl(&tx_fndb[txf_id].vlan_n_ctrl_flags);
+
+ ctl_flags &= ~BNA_TXF_CF_ENABLE;
+
+ writel(ctl_flags, &tx_fndb[txf_id].vlan_n_ctrl_flags);
+
+ /* turn off statistics collection */
+ dev->txf_active &= ~((u64) 1 << txf_id);
+}
+
+/**
+ * bna_txf_enable()
+ *
+ * Enables the Tx Function without reconfiguring.
+ * Also disables collection of statistics.
+ *
+ * @param[in] bna_dev - Pointer to BNA device handle
+ * @param[in] txf_id - Id of the Tx Function to be disabled
+ *
+ * @return void
+ */
+void
+bna_txf_enable(struct bna_dev *dev, unsigned int txf_id)
+{
+ struct bna_tx_fndb_ram *tx_fndb;
+ u32 page_num, ctl_flags;
+
+ tx_fndb = (struct bna_tx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, TX_FNDB_RAM_BASE_OFFSET);
+
+ /* Write the page number register */
+ page_num =
+ BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM + (dev->port * 2),
+ TX_FNDB_RAM_BASE_OFFSET);
+ writel(page_num, dev->regs.page_addr);
+
+ ctl_flags = readl(&tx_fndb[txf_id].vlan_n_ctrl_flags);
+
+ ctl_flags |= BNA_TXF_CF_ENABLE;
+
+ writel(ctl_flags, &tx_fndb[txf_id].vlan_n_ctrl_flags);
+
+ /* turn on statistics collection */
+ dev->txf_active |= ((u64) 1 << txf_id);
+}
+
+/**
+ * bna_set_pause_config()
+ *
+ * Enable/disable Tx/Rx pause through F/W
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] pause - pointer to struct bna_pause_config
+ *
+ * @return BNA_OK in case of success BNA_FAIL otherwise.
+ */
+enum bna_status bna_set_pause_config(struct bna_dev *dev,
+ struct bna_pause_config *pause, void *cbarg)
+{
+ struct bfi_ll_set_pause_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_SET_PAUSE_REQ, 0);
+
+ ll_req.tx_pause = pause->tx_pause;
+ ll_req.rx_pause = pause->rx_pause;
+
+ /* send to f/w */
+ return bna_mbox_send(dev, &ll_req, sizeof(ll_req), cbarg);
+}
+
+/**
+ * bna_mtu_info()
+ *
+ * Send MTU information to F/W.
+ * This is required to do PAUSE efficiently.
+ *
+ * @param[in] dev - pointer to BNA device structure
+ * @param[in] mtu - current mtu size
+ * @param[in] cbarg - argument for the callback function
+ *
+ * @return BNA_OK in case of success BNA_FAIL otherwise.
+ */
+enum bna_status bna_mtu_info(struct bna_dev *dev, u16 mtu,
+ void *cbarg)
+{
+ struct bfi_ll_mtu_info_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
+ ll_req.mtu = htons(mtu);
+
+ /* send to f/w */
+ return bna_mbox_send(dev, &ll_req, sizeof(ll_req), cbarg);
+}
+
+/* Currently we assume just 2 columns, col 0 = small, col 1 = large */
+static const u32 intr_mod_vector[BNA_LOAD_TYPES + 1][BNA_BIAS_TYPES] = {
+ {12, 12},
+ {6, 10},
+ {5, 10},
+ {4, 8},
+ {3, 6},
+ {3, 6},
+ {2, 4},
+ {1, 2},
+};
+
+/**
+ * Returns the coalescing timer value
+ */
+u8 bna_calc_coalescing_timer(struct bna_dev *dev,
+ struct bna_pkt_rate *pkt)
+{
+ u32 load, bias;
+ u32 pkt_rt = 0, small_rt, large_rt;
+
+ small_rt = pkt->small_pkt_cnt;
+ large_rt = pkt->large_pkt_cnt;
+
+ pkt_rt = small_rt + large_rt;
+
+ if (pkt_rt < BNA_10K_PKT_RATE)
+ load = BNA_LOW_LOAD_4;
+ else if (pkt_rt < BNA_20K_PKT_RATE)
+ load = BNA_LOW_LOAD_3;
+ else if (pkt_rt < BNA_30K_PKT_RATE)
+ load = BNA_LOW_LOAD_2;
+ else if (pkt_rt < BNA_40K_PKT_RATE)
+ load = BNA_LOW_LOAD_1;
+ else if (pkt_rt < BNA_50K_PKT_RATE)
+ load = BNA_HIGH_LOAD_1;
+ else if (pkt_rt < BNA_60K_PKT_RATE)
+ load = BNA_HIGH_LOAD_2;
+ else if (pkt_rt < BNA_80K_PKT_RATE)
+ load = BNA_HIGH_LOAD_3;
+ else
+ load = BNA_HIGH_LOAD_4;
+
+ if (small_rt > (large_rt << 1))
+ bias = 0;
+ else
+ bias = 1;
+
+ pkt->small_pkt_cnt = pkt->large_pkt_cnt = 0;
+ return intr_mod_vector[load][bias];
+}
diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bna_queue.c net-next-2.6.33-rc5-mod/drivers/net/bna/bna_queue.c
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bna_queue.c 1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bna_queue.c 2010-02-12 01:39:41.364909000 -0800
@@ -0,0 +1,394 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * @file bna_queue.c BNA Queues
+ */
+
+#include "cna.h"
+#include "bna.h"
+#include "bna_hwreg.h"
+#include "bna_priv.h"
+#include "bfi/bfi_ll.h"
+
+#define BNA_Q_IDLE_STATE 0x00008001
+/*
+ *-----------------------------------------------------------------------------
+ * bna_txq_config()
+ *
+ * For TxQ "txq_id", it configures the Tx-Queue as specified by "cfg_ptr".
+ *-----------------------------------------------------------------------------
+ */
+void bna_txq_config(struct bna_dev *dev, struct bna_txq *q_ptr,
+ unsigned int txq_id, const struct bna_txq_config *cfg_ptr)
+{
+ struct bna_rxtx_q_mem *q_mem;
+ struct bna_txq_mem txq_cfg, *txq_mem;
+ const struct bna_qpt *qpt = &cfg_ptr->qpt;
+ struct bna_dma_addr cur_q_addr;
+ struct bna_doorbell_qset *qset;
+ u32 pg_num;
+
+ cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
+
+ /*
+ * Fill out structure, to be subsequently written
+ * to hardware
+ */
+ txq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
+ txq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
+
+ txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ txq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
+
+ txq_cfg.entry_n_pg_size =
+ ((BNA_TXQ_ENTRY_SIZE >> 2) << 16) | (qpt->page_size >> 2);
+ txq_cfg.int_blk_n_cns_ptr =
+ ((((u8) cfg_ptr->
+ ib_seg_index) << 24) | (((u8) cfg_ptr->
+ ib_id) << 16) | 0x0);
+ txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
+ txq_cfg.nxt_qid_n_fid_n_pri =
+ (((cfg_ptr->txf_id & 0x3f) << 3) | (cfg_ptr->priority & 0x3));
+ txq_cfg.wvc_n_cquota_n_rquota =
+ (((cfg_ptr->wrr_quota & 0xfff) << 12) | (cfg_ptr->
+ wrr_quota & 0xfff));
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + dev->port,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+
+ writel(pg_num, dev->regs.page_addr);
+ /* Write to h/w */
+ q_mem = (struct bna_rxtx_q_mem *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, HQM_RXTX_Q_RAM_BASE_OFFSET);
+
+ txq_mem = &q_mem[txq_id].txq;
+
+ writel(htonl(txq_cfg.pg_tbl_addr_lo), &txq_mem->pg_tbl_addr_lo);
+ writel(htonl(txq_cfg.pg_tbl_addr_hi), &txq_mem->pg_tbl_addr_hi);
+ writel(htonl(txq_cfg.cur_q_entry_lo), &txq_mem->cur_q_entry_lo);
+ writel(htonl(txq_cfg.cur_q_entry_hi), &txq_mem->cur_q_entry_hi);
+
+ writel(txq_cfg.pg_cnt_n_prd_ptr, &txq_mem->pg_cnt_n_prd_ptr);
+ writel(txq_cfg.entry_n_pg_size, &txq_mem->entry_n_pg_size);
+ writel(txq_cfg.int_blk_n_cns_ptr, &txq_mem->int_blk_n_cns_ptr);
+ writel(txq_cfg.cns_ptr2_n_q_state, &txq_mem->cns_ptr2_n_q_state);
+ writel(txq_cfg.nxt_qid_n_fid_n_pri, &txq_mem->nxt_qid_n_fid_n_pri);
+ writel(txq_cfg.wvc_n_cquota_n_rquota, &txq_mem->wvc_n_cquota_n_rquota);
+
+ qset = (struct bna_doorbell_qset *)
+ BNA_GET_DOORBELL_BASE_ADDR(dev->bar0);
+ q_ptr->doorbell = &qset[txq_id].txq[0];
+
+ q_ptr->q.producer_index = 0;
+ q_ptr->q.consumer_index = 0;
+}
+
+/**
+ * bna_txq_stop()
+ *
+ * Stops the TxQ identified by the TxQ Id.
+ * Should be called with a lock held
+ * The driver should wait for the response to
+ * conclude if the Q stop is successful or not.
+ *
+ * @param[in] q_id - Id of the TxQ
+ *
+ * @return BNA_OK in case of success, else BNA_FAIL
+ */
+enum bna_status
+bna_txq_stop(struct bna_dev *dev, u32 txq_id)
+{
+ struct bfi_ll_q_stop_req ll_req;
+ u64 bit_mask = 1 << txq_id;
+
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
+ ll_req.mh.mtag.i2htok = 0;
+
+ ll_req.q_id_mask[0] = htonl(lower_32_bits(bit_mask));
+ ll_req.q_id_mask[1] = htonl(upper_32_bits(bit_mask));
+
+ /* send to f/w */
+ return bna_mbox_send(dev, &ll_req, sizeof(ll_req), dev->cbarg);
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * bna_rxq_config()
+ *
+ * For RxQ "rxq_id", it configures the Rx-Queue as specified by "cfg_ptr".
+ *-----------------------------------------------------------------------------
+ */
+void bna_rxq_config(struct bna_dev *dev, struct bna_rxq *q_ptr,
+ unsigned int rxq_id, const struct bna_rxq_config *cfg_ptr)
+{
+ struct bna_rxtx_q_mem *q_mem;
+ struct bna_rxq_mem rxq_cfg, *rxq_mem;
+ const struct bna_qpt *qpt = &cfg_ptr->qpt;
+ struct bna_dma_addr cur_q_addr;
+ struct bna_doorbell_qset *qset;
+ u32 pg_num;
+
+ cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
+ /*
+ * Fill out structure, to be subsequently written
+ * to hardware
+ */
+ rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
+ rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
+ rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ rxq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
+ rxq_cfg.entry_n_pg_size =
+ ((BNA_RXQ_ENTRY_SIZE >> 2) << 16) | (qpt->page_size >> 2);
+ rxq_cfg.sg_n_cq_n_cns_ptr = (((u8) cfg_ptr->cq_id) << 16) | 0x0;
+ rxq_cfg.buf_sz_n_q_state =
+ (cfg_ptr->buffer_size << 16) | BNA_Q_IDLE_STATE;
+ rxq_cfg.next_qid = 0x0 | (0x3 << 8);
+
+ /* Write the page number register */
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + dev->port,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ writel(pg_num, dev->regs.page_addr);
+
+ /* Write to h/w */
+ q_mem = (struct bna_rxtx_q_mem *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, HQM_RXTX_Q_RAM_BASE_OFFSET);
+ rxq_mem = &q_mem[rxq_id].rxq;
+
+ writel(htonl(rxq_cfg.pg_tbl_addr_lo), &rxq_mem->pg_tbl_addr_lo);
+ writel(htonl(rxq_cfg.pg_tbl_addr_hi), &rxq_mem->pg_tbl_addr_hi);
+ writel(htonl(rxq_cfg.cur_q_entry_lo), &rxq_mem->cur_q_entry_lo);
+ writel(htonl(rxq_cfg.cur_q_entry_hi), &rxq_mem->cur_q_entry_hi);
+
+ writel(rxq_cfg.pg_cnt_n_prd_ptr, &rxq_mem->pg_cnt_n_prd_ptr);
+ writel(rxq_cfg.entry_n_pg_size, &rxq_mem->entry_n_pg_size);
+ writel(rxq_cfg.sg_n_cq_n_cns_ptr, &rxq_mem->sg_n_cq_n_cns_ptr);
+ writel(rxq_cfg.buf_sz_n_q_state, &rxq_mem->buf_sz_n_q_state);
+ writel(rxq_cfg.next_qid, &rxq_mem->next_qid);
+
+ qset = (struct bna_doorbell_qset *)
+ BNA_GET_DOORBELL_BASE_ADDR(dev->bar0);
+ q_ptr->doorbell = &qset[rxq_id].rxq[0];
+
+ q_ptr->q.producer_index = 0;
+ q_ptr->q.consumer_index = 0;
+}
+
+/**
+ * bna_rxq_stop()
+ *
+ * Stops the RxQ identified by the RxQ Id.
+ * Should be called with a lock held
+ * The driver should wait for the response to
+ * conclude if the Q stop is successful or not.
+ *
+ * @param[in] q_id - Id of the RxQ
+ *
+ * @return BNA_OK in case of success, else BNA_FAIL
+ */
+enum bna_status
+bna_rxq_stop(struct bna_dev *dev, u32 rxq_id)
+{
+ struct bfi_ll_q_stop_req ll_req;
+ u64 bit_mask = 1 << rxq_id;
+
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_RXQ_STOP_REQ;
+ ll_req.mh.mtag.i2htok = 0;
+
+ ll_req.q_id_mask[0] = htonl(lower_32_bits(bit_mask));
+ ll_req.q_id_mask[1] = htonl(upper_32_bits(bit_mask));
+
+ /* send to f/w */
+ return bna_mbox_send(dev, &ll_req, sizeof(ll_req), dev->cbarg);
+}
+
+enum bna_status bna_multi_rxq_stop(struct bna_dev *dev,
+ u64 rxq_id_mask)
+{
+ struct bfi_ll_q_stop_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
+
+ ll_req.q_id_mask[0] = htonl(lower_32_bits(rxq_id_mask));
+ ll_req.q_id_mask[1] = htonl(upper_32_bits(rxq_id_mask));
+
+ /* send to f/w */
+ return bna_mbox_send(dev, &ll_req, sizeof(ll_req), dev->cbarg);
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * bna_cq_config()
+ *
+ * For CQ "cq_id", it configures the Rx-Completion Queue as specified by
+ * "cfg_ptr".
+ *-----------------------------------------------------------------------------
+ */
+void bna_cq_config(struct bna_dev *dev, struct bna_cq *q_ptr,
+ unsigned int cq_id, const struct bna_cq_config *cfg_ptr)
+{
+ struct bna_cq_mem cq_cfg, *cq_mem;
+ const struct bna_qpt *qpt = &cfg_ptr->qpt;
+ struct bna_dma_addr cur_q_addr;
+ u32 pg_num;
+
+ cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
+
+ /*
+ * Fill out structure, to be subsequently written
+ * to hardware
+ */
+ cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
+ cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
+ cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
+ cq_cfg.entry_n_pg_size =
+ ((BNA_CQ_ENTRY_SIZE >> 2) << 16) | (qpt->page_size >> 2);
+ cq_cfg.int_blk_n_cns_ptr =
+ ((((u8) cfg_ptr->
+ ib_seg_index) << 24) | (((u8) cfg_ptr->
+ ib_id) << 16) | 0x0);
+ cq_cfg.q_state = BNA_Q_IDLE_STATE;
+
+ /* Write the page number register */
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + dev->port,
+ HQM_CQ_RAM_BASE_OFFSET);
+
+ writel(pg_num, dev->regs.page_addr);
+ /* H/W write */
+ cq_mem = (struct bna_cq_mem *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, HQM_CQ_RAM_BASE_OFFSET);
+ writel(htonl(cq_cfg.pg_tbl_addr_lo), &cq_mem[cq_id].pg_tbl_addr_lo);
+ writel(htonl(cq_cfg.pg_tbl_addr_hi), &cq_mem[cq_id].pg_tbl_addr_hi);
+ writel(htonl(cq_cfg.cur_q_entry_lo), &cq_mem[cq_id].cur_q_entry_lo);
+ writel(htonl(cq_cfg.cur_q_entry_hi), &cq_mem[cq_id].cur_q_entry_hi);
+
+ writel(cq_cfg.pg_cnt_n_prd_ptr, &cq_mem[cq_id].pg_cnt_n_prd_ptr);
+ writel(cq_cfg.entry_n_pg_size, &cq_mem[cq_id].entry_n_pg_size);
+ writel(cq_cfg.int_blk_n_cns_ptr, &cq_mem[cq_id].int_blk_n_cns_ptr);
+ writel(cq_cfg.q_state, &cq_mem[cq_id].q_state);
+
+ q_ptr->q.producer_index = 0;
+ q_ptr->q.consumer_index = 0;
+
+}
+
+/*
+ * bna_ib_idx_reset()
+ *
+ * For the specified IB, it clears the IB index
+ *
+ * @param[in] cfg_ptr - pointer to IB Configuration Structure.
+ *
+ * @return void
+ */
+void
+bna_ib_idx_reset(struct bna_dev *dev,
+ const struct bna_ib_config *cfg_ptr)
+{
+ u32 i, pg_num, *ib_idx;
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + dev->port,
+ HQM_INDX_TBL_RAM_BASE_OFFSET);
+ writel(pg_num, dev->regs.page_addr);
+
+ ib_idx = (u32 *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, HQM_INDX_TBL_RAM_BASE_OFFSET);
+ ib_idx += cfg_ptr->index_table_offset;
+ for (i = 0; i < cfg_ptr->seg_size; i++)
+ *ib_idx++ = 0;
+}
+
+/*
+ * bna_ib_config_set()
+ *
+ * For IB "ib_id", it configures the Interrupt Block specified by "cfg_ptr".
+ *
+ * @param[in] ib_ptr - pointer to IB Data Structure.
+ * @param[in] ib_id - interrupt-block ID
+ * @param[in] cfg_ptr - pointer to IB Configuration Structure.
+ *
+ * @return void
+ */
+void bna_ib_config_set(struct bna_dev *dev, struct bna_ib *ib_ptr,
+ unsigned int ib_id, const struct bna_ib_config *cfg_ptr)
+{
+ struct bna_ib_blk_mem ib_cfg, *ib_mem;
+ u32 pg_num;
+ struct bna_doorbell_qset *qset;
+
+ ib_cfg.host_addr_lo = (u32) (cfg_ptr->ib_seg_addr.lsb);
+ ib_cfg.host_addr_hi = (u32) (cfg_ptr->ib_seg_addr.msb);
+
+ ib_cfg.clsc_n_ctrl_n_msix =
+ ((cfg_ptr->coalescing_timer << 16) | (cfg_ptr->
+ control_flags << 8) |
+ (cfg_ptr->msix_vector));
+ ib_cfg.ipkt_n_ent_n_idxof =
+ ((cfg_ptr->interpkt_timer & 0xf) << 16) | (cfg_ptr->
+ seg_size << 8) |
+ (cfg_ptr->index_table_offset);
+ ib_cfg.ipkt_cnt_cfg_n_unacked = (cfg_ptr->interpkt_count << 24);
+
+ /* Write the page number register */
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + dev->port,
+ HQM_IB_RAM_BASE_OFFSET);
+ writel(pg_num, dev->regs.page_addr);
+
+ ib_mem = (struct bna_ib_blk_mem *)
+ BNA_GET_MEM_BASE_ADDR(dev->bar0, HQM_IB_RAM_BASE_OFFSET);
+
+ writel(htonl(ib_cfg.host_addr_lo), &ib_mem[ib_id].host_addr_lo);
+ writel(htonl(ib_cfg.host_addr_hi), &ib_mem[ib_id].host_addr_hi);
+
+ writel(ib_cfg.clsc_n_ctrl_n_msix, &ib_mem[ib_id].clsc_n_ctrl_n_msix);
+ writel(ib_cfg.ipkt_n_ent_n_idxof, &ib_mem[ib_id].ipkt_n_ent_n_idxof);
+ writel(ib_cfg.ipkt_cnt_cfg_n_unacked,
+ &ib_mem[ib_id].ipkt_cnt_cfg_n_unacked);
+
+ qset = (struct bna_doorbell_qset *)
+ BNA_GET_DOORBELL_BASE_ADDR(dev->bar0);
+ ib_ptr->doorbell_addr =
+ (&qset[ib_id >> 1].ib0[(ib_id & 0x1) * (0x20 >> 2)]);
+
+ ib_ptr->doorbell_ack =
+ BNA_DOORBELL_IB_INT_ACK(cfg_ptr->coalescing_timer, 0);
+
+ bna_ib_idx_reset(dev, cfg_ptr);
+}
+
+/*
+ * bna_ib_disable()
+ *
+ * Disables the Interrupt Block "ib_id".
+ *
+ * @param[in] ib_ptr - pointer to IB Data Structure.
+ *
+ * @return void
+ */
+void
+bna_ib_disable(struct bna_dev *bna_dev, const struct bna_ib *ib_ptr)
+{
+ writel(BNA_DOORBELL_IB_INT_DISABLE, ib_ptr->doorbell_addr);
+}
diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bnad_ethtool.c net-next-2.6.33-rc5-mod/drivers/net/bna/bnad_ethtool.c
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bnad_ethtool.c 1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bnad_ethtool.c 2010-02-12 01:39:41.386909000 -0800
@@ -0,0 +1,1100 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * bna_ethtool.c Brocade 10G PCIe Ethernet driver.
+ */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+
+#include "bnad.h"
+#include "cna.h"
+#include "bna_hwreg.h"
+#include "bna_iocll.h"
+#include "bnad_defs.h"
+#include "phyport_defs.h"
+
+#define BNAD_ETHTOOL_STATS_NUM \
+ (sizeof(struct net_device_stats) / sizeof(unsigned long) + \
+ sizeof(struct bnad_drv_stats) / sizeof(u64) + \
+ (offsetof(struct bna_stats, rxf_stats[0]) + \
+ sizeof(struct bna_stats_txf)) / sizeof(u64))
+
+static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "multicast",
+ "collisions",
+
+ "rx_length_errors",
+ "rx_over_errors",
+ "rx_crc_errors",
+ "rx_frame_errors",
+ "rx_fifo_errors",
+ "rx_missed_errors",
+
+ "tx_aborted_errors",
+ "tx_carrier_errors",
+ "tx_fifo_errors",
+ "tx_heartbeat_errors",
+ "tx_window_errors",
+
+ "rx_compressed",
+ "tx_compressed",
+
+ "netif_queue_stop",
+ "netif_queue_wakeup",
+ "tso4",
+ "tso6",
+ "tso_err",
+ "tcpcsum_offload",
+ "udpcsum_offload",
+ "csum_help",
+ "csum_help_err",
+ "hw_stats_updates",
+ "napi_complete",
+
+ "mac_frame_64",
+ "mac_frame_65_127",
+ "mac_frame_128_255",
+ "mac_frame_256_511",
+ "mac_frame_512_1023",
+ "mac_frame_1024_1518",
+ "mac_frame_1518_1522",
+ "mac_rx_bytes",
+ "mac_rx_packets",
+ "mac_rx_fcs_error",
+ "mac_rx_multicast",
+ "mac_rx_broadcast",
+ "mac_rx_control_frames",
+ "mac_rx_pause",
+ "mac_rx_unknown_opcode",
+ "mac_rx_alignment_error",
+ "mac_rx_frame_length_error",
+ "mac_rx_code_error",
+ "mac_rx_carrier_sense_error",
+ "mac_rx_undersize",
+ "mac_rx_oversize",
+ "mac_rx_fragments",
+ "mac_rx_jabber",
+ "mac_rx_drop",
+
+ "bpc_rx_pause_0",
+ "bpc_rx_pause_1",
+ "bpc_rx_pause_2",
+ "bpc_rx_pause_3",
+ "bpc_rx_pause_4",
+ "bpc_rx_pause_5",
+ "bpc_rx_pause_6",
+ "bpc_rx_pause_7",
+ "bpc_rx_zero_pause_0",
+ "bpc_rx_zero_pause_1",
+ "bpc_rx_zero_pause_2",
+ "bpc_rx_zero_pause_3",
+ "bpc_rx_zero_pause_4",
+ "bpc_rx_zero_pause_5",
+ "bpc_rx_zero_pause_6",
+ "bpc_rx_zero_pause_7",
+ "bpc_rx_first_pause_0",
+ "bpc_rx_first_pause_1",
+ "bpc_rx_first_pause_2",
+ "bpc_rx_first_pause_3",
+ "bpc_rx_first_pause_4",
+ "bpc_rx_first_pause_5",
+ "bpc_rx_first_pause_6",
+ "bpc_rx_first_pause_7",
+
+ "rad_rx_frames",
+ "rad_rx_octets",
+ "rad_rx_vlan_frames",
+ "rad_rx_ucast",
+ "rad_rx_ucast_octets",
+ "rad_rx_ucast_vlan",
+ "rad_rx_mcast",
+ "rad_rx_mcast_octets",
+ "rad_rx_mcast_vlan",
+ "rad_rx_bcast",
+ "rad_rx_bcast_octets",
+ "rad_rx_bcast_vlan",
+ "rad_rx_drops",
+
+ "fc_rx_ucast_octets",
+ "fc_rx_ucast",
+ "fc_rx_ucast_vlan",
+ "fc_rx_mcast_octets",
+ "fc_rx_mcast",
+ "fc_rx_mcast_vlan",
+ "fc_rx_bcast_octets",
+ "fc_rx_bcast",
+ "fc_rx_bcast_vlan",
+
+ "mac_tx_bytes",
+ "mac_tx_packets",
+ "mac_tx_multicast",
+ "mac_tx_broadcast",
+ "mac_tx_pause",
+ "mac_tx_deferral",
+ "mac_tx_excessive_deferral",
+ "mac_tx_single_collision",
+ "mac_tx_muliple_collision",
+ "mac_tx_late_collision",
+ "mac_tx_excessive_collision",
+ "mac_tx_total_collision",
+ "mac_tx_pause_honored",
+ "mac_tx_drop",
+ "mac_tx_jabber",
+ "mac_tx_fcs_error",
+ "mac_tx_control_frame",
+ "mac_tx_oversize",
+ "mac_tx_undersize",
+ "mac_tx_fragments",
+
+ "bpc_tx_pause_0",
+ "bpc_tx_pause_1",
+ "bpc_tx_pause_2",
+ "bpc_tx_pause_3",
+ "bpc_tx_pause_4",
+ "bpc_tx_pause_5",
+ "bpc_tx_pause_6",
+ "bpc_tx_pause_7",
+ "bpc_tx_zero_pause_0",
+ "bpc_tx_zero_pause_1",
+ "bpc_tx_zero_pause_2",
+ "bpc_tx_zero_pause_3",
+ "bpc_tx_zero_pause_4",
+ "bpc_tx_zero_pause_5",
+ "bpc_tx_zero_pause_6",
+ "bpc_tx_zero_pause_7",
+ "bpc_tx_first_pause_0",
+ "bpc_tx_first_pause_1",
+ "bpc_tx_first_pause_2",
+ "bpc_tx_first_pause_3",
+ "bpc_tx_first_pause_4",
+ "bpc_tx_first_pause_5",
+ "bpc_tx_first_pause_6",
+ "bpc_tx_first_pause_7",
+
+ "fc_tx_ucast_octets",
+ "fc_tx_ucast",
+ "fc_tx_ucast_vlan",
+ "fc_tx_mcast_octets",
+ "fc_tx_mcast",
+ "fc_tx_mcast_vlan",
+ "fc_tx_bcast_octets",
+ "fc_tx_bcast",
+ "fc_tx_bcast_vlan",
+ "fc_tx_parity_errors",
+ "fc_tx_timeout",
+ "fc_tx_fid_parity_errors",
+
+ "txf0_ucast_octets",
+ "txf0_ucast",
+ "txf0_ucast_vlan",
+ "txf0_mcast_octets",
+ "txf0_mcast",
+ "txf0_mcast_vlan",
+ "txf0_bcast_octets",
+ "txf0_bcast",
+ "txf0_bcast_vlan",
+ "txf0_errors",
+ "txf0_filter_vlan",
+ "txf0_filter_mac_sa"
+};
+
+static int bnad_get_regs_len(struct net_device *netdev);
+static int bnad_get_stats_count_locked(struct net_device *netdev);
+
+static int bnad_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_port_param port_param;
+
+ spin_lock_irq(&bnad->priv_lock);
+ bna_port_param_get(bnad->priv, &port_param);
+ spin_unlock_irq(&bnad->priv_lock);
+
+ if (port_param.speed == BNA_LINK_SPEED_10Gbps) {
+ cmd->supported = SUPPORTED_10000baseT_Full;
+ cmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (port_param.autoneg) {
+ cmd->supported |= SUPPORTED_Autoneg;
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->speed = -1;
+ cmd->duplex = -1;
+ }
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static int bnad_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ /* 10G full duplex setting supported only */
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP;
+ else {
+ if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL))
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void bnad_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bfa_ioc_attr *ioc_attr;
+
+ strcpy(drvinfo->driver, BNAD_NAME);
+ strcpy(drvinfo->version, BNAD_VERSION);
+
+ ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
+ if (ioc_attr) {
+ memset(ioc_attr, 0, sizeof(*ioc_attr));
+ spin_lock_irq(&bnad->priv_lock);
+ bna_iocll_getattr(bnad->priv, ioc_attr);
+ spin_unlock_irq(&bnad->priv_lock);
+
+ strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version) - 1);
+ kfree(ioc_attr);
+ }
+
+ strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+}
+
+static int get_regs(struct bnad *bnad, u32 * regs)
+{
+ int num = 0, i;
+ u32 reg_addr;
+
+#define BNAD_GET_REG(addr) \
+do { \
+ if (regs) \
+ regs[num++] = readl(bnad->bar0 + (addr)); \
+ else \
+ num++; \
+} while (0)
+
+ spin_lock_irq(&bnad->priv_lock);
+
+ /* DMA Block Internal Registers */
+ BNAD_GET_REG(DMA_CTRL_REG0);
+ BNAD_GET_REG(DMA_CTRL_REG1);
+ BNAD_GET_REG(DMA_ERR_INT_STATUS);
+ BNAD_GET_REG(DMA_ERR_INT_ENABLE);
+ BNAD_GET_REG(DMA_ERR_INT_STATUS_SET);
+
+ /* APP Block Register Address Offset from BAR0 */
+ BNAD_GET_REG(HOSTFN0_INT_STATUS);
+ BNAD_GET_REG(HOSTFN0_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN0);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0);
+ BNAD_GET_REG(FN0_PCIE_ERR_REG);
+ BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(HOSTFN1_INT_STATUS);
+ BNAD_GET_REG(HOSTFN1_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN1);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1);
+ BNAD_GET_REG(FN1_PCIE_ERR_REG);
+ BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(PCIE_MISC_REG);
+
+ BNAD_GET_REG(HOST_SEM0_REG);
+ BNAD_GET_REG(HOST_SEM1_REG);
+ BNAD_GET_REG(HOST_SEM2_REG);
+ BNAD_GET_REG(HOST_SEM3_REG);
+ BNAD_GET_REG(HOST_SEM0_INFO_REG);
+ BNAD_GET_REG(HOST_SEM1_INFO_REG);
+ BNAD_GET_REG(HOST_SEM2_INFO_REG);
+ BNAD_GET_REG(HOST_SEM3_INFO_REG);
+
+ BNAD_GET_REG(TEMPSENSE_CNTL_REG);
+ BNAD_GET_REG(TEMPSENSE_STAT_REG);
+
+ BNAD_GET_REG(APP_LOCAL_ERR_STAT);
+ BNAD_GET_REG(APP_LOCAL_ERR_MSK);
+
+ BNAD_GET_REG(PCIE_LNK_ERR_STAT);
+ BNAD_GET_REG(PCIE_LNK_ERR_MSK);
+
+ BNAD_GET_REG(FCOE_FIP_ETH_TYPE);
+ BNAD_GET_REG(RESV_ETH_TYPE);
+
+ BNAD_GET_REG(HOSTFN2_INT_STATUS);
+ BNAD_GET_REG(HOSTFN2_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN2);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2);
+ BNAD_GET_REG(FN2_PCIE_ERR_REG);
+ BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(HOSTFN3_INT_STATUS);
+ BNAD_GET_REG(HOSTFN3_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN3);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3);
+ BNAD_GET_REG(FN3_PCIE_ERR_REG);
+ BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG);
+
+ /* Host Command Status Registers */
+ reg_addr = HOST_CMDSTS0_CLR_REG;
+ for (i = 0; i < 16; i++) {
+ BNAD_GET_REG(reg_addr);
+ BNAD_GET_REG(reg_addr + 4);
+ BNAD_GET_REG(reg_addr + 8);
+ reg_addr += 0x10;
+ }
+
+ /* Function ID register */
+ BNAD_GET_REG(FNC_ID_REG);
+
+ /* Function personality register */
+ BNAD_GET_REG(FNC_PERS_REG);
+
+ /* Operation mode register */
+ BNAD_GET_REG(OP_MODE);
+
+ /* LPU0 Registers */
+ BNAD_GET_REG(LPU0_MBOX_CTL_REG);
+ BNAD_GET_REG(LPU0_MBOX_CMD_REG);
+ BNAD_GET_REG(LPU0_MBOX_LINK_0REG);
+ BNAD_GET_REG(LPU1_MBOX_LINK_0REG);
+ BNAD_GET_REG(LPU0_MBOX_STATUS_0REG);
+ BNAD_GET_REG(LPU1_MBOX_STATUS_0REG);
+ BNAD_GET_REG(LPU0_ERR_STATUS_REG);
+ BNAD_GET_REG(LPU0_ERR_SET_REG);
+
+ /* LPU1 Registers */
+ BNAD_GET_REG(LPU1_MBOX_CTL_REG);
+ BNAD_GET_REG(LPU1_MBOX_CMD_REG);
+ BNAD_GET_REG(LPU0_MBOX_LINK_1REG);
+ BNAD_GET_REG(LPU1_MBOX_LINK_1REG);
+ BNAD_GET_REG(LPU0_MBOX_STATUS_1REG);
+ BNAD_GET_REG(LPU1_MBOX_STATUS_1REG);
+ BNAD_GET_REG(LPU1_ERR_STATUS_REG);
+ BNAD_GET_REG(LPU1_ERR_SET_REG);
+
+ /* PSS Registers */
+ BNAD_GET_REG(PSS_CTL_REG);
+ BNAD_GET_REG(PSS_ERR_STATUS_REG);
+ BNAD_GET_REG(ERR_STATUS_SET);
+ BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG);
+
+ /* Catapult CPQ Registers */
+ BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT);
+
+ /* Host Function Force Parity Error Registers */
+ BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR);
+
+ /* LL Port[0|1] Halt Mask Registers */
+ BNAD_GET_REG(LL_HALT_MSK_P0);
+ BNAD_GET_REG(LL_HALT_MSK_P1);
+
+ /* LL Port[0|1] Error Mask Registers */
+ BNAD_GET_REG(LL_ERR_MSK_P0);
+ BNAD_GET_REG(LL_ERR_MSK_P1);
+
+ /* EMC FLI Registers */
+ BNAD_GET_REG(FLI_CMD_REG);
+ BNAD_GET_REG(FLI_ADDR_REG);
+ BNAD_GET_REG(FLI_CTL_REG);
+ BNAD_GET_REG(FLI_WRDATA_REG);
+ BNAD_GET_REG(FLI_RDDATA_REG);
+ BNAD_GET_REG(FLI_DEV_STATUS_REG);
+ BNAD_GET_REG(FLI_SIG_WD_REG);
+
+ BNAD_GET_REG(FLI_DEV_VENDOR_REG);
+ BNAD_GET_REG(FLI_ERR_STATUS_REG);
+
+ /* RxAdm 0 Registers */
+ BNAD_GET_REG(RAD0_CTL_REG);
+ BNAD_GET_REG(RAD0_PE_PARM_REG);
+ BNAD_GET_REG(RAD0_BCN_REG);
+ BNAD_GET_REG(RAD0_DEFAULT_REG);
+ BNAD_GET_REG(RAD0_PROMISC_REG);
+ BNAD_GET_REG(RAD0_BCNQ_REG);
+ BNAD_GET_REG(RAD0_DEFAULTQ_REG);
+
+ BNAD_GET_REG(RAD0_ERR_STS);
+ BNAD_GET_REG(RAD0_SET_ERR_STS);
+ BNAD_GET_REG(RAD0_ERR_INT_EN);
+ BNAD_GET_REG(RAD0_FIRST_ERR);
+ BNAD_GET_REG(RAD0_FORCE_ERR);
+
+ BNAD_GET_REG(RAD0_MAC_MAN_1H);
+ BNAD_GET_REG(RAD0_MAC_MAN_1L);
+ BNAD_GET_REG(RAD0_MAC_MAN_2H);
+ BNAD_GET_REG(RAD0_MAC_MAN_2L);
+ BNAD_GET_REG(RAD0_MAC_MAN_3H);
+ BNAD_GET_REG(RAD0_MAC_MAN_3L);
+ BNAD_GET_REG(RAD0_MAC_MAN_4H);
+ BNAD_GET_REG(RAD0_MAC_MAN_4L);
+
+ BNAD_GET_REG(RAD0_LAST4_IP);
+
+ /* RxAdm 1 Registers */
+ BNAD_GET_REG(RAD1_CTL_REG);
+ BNAD_GET_REG(RAD1_PE_PARM_REG);
+ BNAD_GET_REG(RAD1_BCN_REG);
+ BNAD_GET_REG(RAD1_DEFAULT_REG);
+ BNAD_GET_REG(RAD1_PROMISC_REG);
+ BNAD_GET_REG(RAD1_BCNQ_REG);
+ BNAD_GET_REG(RAD1_DEFAULTQ_REG);
+
+ BNAD_GET_REG(RAD1_ERR_STS);
+ BNAD_GET_REG(RAD1_SET_ERR_STS);
+ BNAD_GET_REG(RAD1_ERR_INT_EN);
+
+ /* TxA0 Registers */
+ BNAD_GET_REG(TXA0_CTRL_REG);
+ /* TxA0 TSO Sequence # Registers (RO) */
+ for (i = 0; i < 8; i++) {
+ BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i));
+ BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i));
+ }
+
+ /* TxA1 Registers */
+ BNAD_GET_REG(TXA1_CTRL_REG);
+ /* TxA1 TSO Sequence # Registers (RO) */
+ for (i = 0; i < 8; i++) {
+ BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i));
+ BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i));
+ }
+
+ /* RxA Registers */
+ BNAD_GET_REG(RXA0_CTL_REG);
+ BNAD_GET_REG(RXA1_CTL_REG);
+
+ /* PLB0 Registers */
+ BNAD_GET_REG(PLB0_ECM_TIMER_REG);
+ BNAD_GET_REG(PLB0_RL_CTL);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB0_RL_MAX_BC(i));
+ BNAD_GET_REG(PLB0_RL_TU_PRIO);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB0_RL_BYTE_CNT(i));
+ BNAD_GET_REG(PLB0_RL_MIN_REG);
+ BNAD_GET_REG(PLB0_RL_MAX_REG);
+ BNAD_GET_REG(PLB0_EMS_ADD_REG);
+
+ /* PLB1 Registers */
+ BNAD_GET_REG(PLB1_ECM_TIMER_REG);
+ BNAD_GET_REG(PLB1_RL_CTL);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB1_RL_MAX_BC(i));
+ BNAD_GET_REG(PLB1_RL_TU_PRIO);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB1_RL_BYTE_CNT(i));
+ BNAD_GET_REG(PLB1_RL_MIN_REG);
+ BNAD_GET_REG(PLB1_RL_MAX_REG);
+ BNAD_GET_REG(PLB1_EMS_ADD_REG);
+
+ /* HQM Control Register */
+ BNAD_GET_REG(HQM0_CTL_REG);
+ BNAD_GET_REG(HQM0_RXQ_STOP_SEM);
+ BNAD_GET_REG(HQM0_TXQ_STOP_SEM);
+ BNAD_GET_REG(HQM1_CTL_REG);
+ BNAD_GET_REG(HQM1_RXQ_STOP_SEM);
+ BNAD_GET_REG(HQM1_TXQ_STOP_SEM);
+
+ /* LUT Registers */
+ BNAD_GET_REG(LUT0_ERR_STS);
+ BNAD_GET_REG(LUT0_SET_ERR_STS);
+ BNAD_GET_REG(LUT1_ERR_STS);
+ BNAD_GET_REG(LUT1_SET_ERR_STS);
+
+ /* TRC Registers */
+ BNAD_GET_REG(TRC_CTL_REG);
+ BNAD_GET_REG(TRC_MODS_REG);
+ BNAD_GET_REG(TRC_TRGC_REG);
+ BNAD_GET_REG(TRC_CNT1_REG);
+ BNAD_GET_REG(TRC_CNT2_REG);
+ BNAD_GET_REG(TRC_NXTS_REG);
+ BNAD_GET_REG(TRC_DIRR_REG);
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_TRGM_REG(i));
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_NXTM_REG(i));
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_STRM_REG(i));
+
+ spin_unlock_irq(&bnad->priv_lock);
+#undef BNAD_GET_REG
+ return num;
+}
+
+static int bnad_get_regs_len(struct net_device *netdev)
+{
+ int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32);
+ return ret;
+}
+
+static void bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *buf)
+{
+ memset(buf, 0, bnad_get_regs_len(netdev));
+ get_regs(netdev_priv(netdev), buf);
+}
+
+static void bnad_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+}
+
+static int bnad_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ coalesce->rx_coalesce_usecs =
+ bnad->rx_coalescing_timeo * BNAD_COALESCING_TIMER_UNIT;
+ coalesce->rx_max_coalesced_frames = bnad->rx_interpkt_count;
+ coalesce->rx_coalesce_usecs_irq = bnad->rx_interpkt_timeo;
+ coalesce->tx_coalesce_usecs =
+ bnad->tx_coalescing_timeo * BNAD_COALESCING_TIMER_UNIT;
+ coalesce->tx_max_coalesced_frames = bnad->tx_interpkt_count;
+
+ coalesce->use_adaptive_rx_coalesce = bnad->rx_dyn_coalesce_on;
+ return 0;
+}
+
+static int bnad_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, err = 0, reset = 0;
+ u16 ib_id;
+
+ if (coalesce->rx_coalesce_usecs == 0 ||
+ coalesce->rx_coalesce_usecs >
+ BNAD_MAX_COALESCING_TIMEO * BNAD_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+ if (coalesce->rx_max_coalesced_frames > BNAD_MAX_INTERPKT_COUNT)
+ return -EINVAL;
+ if (coalesce->rx_coalesce_usecs_irq == 0 ||
+ coalesce->rx_coalesce_usecs_irq > BNAD_MAX_INTERPKT_TIMEO)
+ return -EINVAL;
+
+ if (coalesce->tx_coalesce_usecs == 0 ||
+ coalesce->tx_coalesce_usecs >
+ BNAD_MAX_COALESCING_TIMEO * BNAD_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+ if (coalesce->tx_max_coalesced_frames > BNAD_MAX_INTERPKT_COUNT)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ spin_lock_irq(&bnad->priv_lock);
+
+ bnad->rx_dyn_coalesce_on = coalesce->use_adaptive_rx_coalesce;
+ if (bnad->rx_coalescing_timeo == 0)
+ bnad->rx_coalescing_timeo = 1;
+ if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad)) {
+ for (i = 0; i < bnad->cq_num; i++) {
+ ib_id = bnad->cq_table[i].cq_config.ib_id;
+ bnad->ib_table[ib_id].ib_config.coalescing_timer =
+ bnad->rx_coalescing_timeo;
+ if (!bnad->rx_dyn_coalesce_on) {
+ bnad->cq_table[i].rx_coalescing_timeo =
+ bnad->rx_coalescing_timeo;
+ }
+ }
+ }
+ if (coalesce->rx_max_coalesced_frames != bnad->rx_interpkt_count) {
+ bnad->rx_interpkt_count = coalesce->rx_max_coalesced_frames;
+ reset++;
+ }
+ if (coalesce->rx_coalesce_usecs_irq != bnad->rx_interpkt_timeo) {
+ bnad->rx_interpkt_timeo = coalesce->rx_coalesce_usecs_irq;
+ reset++;
+ }
+
+ bnad->tx_coalescing_timeo =
+ coalesce->tx_coalesce_usecs / BNAD_COALESCING_TIMER_UNIT;
+ if (bnad->tx_coalescing_timeo == 0)
+ bnad->tx_coalescing_timeo = 1;
+ if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad)) {
+ for (i = 0; i < bnad->txq_num; i++) {
+ ib_id = bnad->txq_table[i].txq_config.ib_id;
+ bnad->ib_table[ib_id].ib_config.coalescing_timer =
+ bnad->tx_coalescing_timeo;
+ }
+ }
+ if (coalesce->tx_max_coalesced_frames != bnad->tx_interpkt_count) {
+ bnad->tx_interpkt_count = coalesce->tx_max_coalesced_frames;
+ reset++;
+ }
+
+ spin_unlock_irq(&bnad->priv_lock);
+
+ if (reset)
+ err = bnad_sw_reset_locked(netdev);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ return err;
+}
+
+static void bnad_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
+
+ ringparam->rx_pending = bnad->rxq_depth;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_pending = bnad->txq_depth;
+}
+
+static int bnad_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ int err = 0;
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (ringparam->rx_pending == bnad->rxq_depth &&
+ ringparam->tx_pending == bnad->txq_depth) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
+ !BNA_POWER_OF_2(ringparam->rx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+ if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
+ !BNA_POWER_OF_2(ringparam->tx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+
+ if (ringparam->rx_pending != bnad->rxq_depth) {
+ bnad->rxq_depth = ringparam->rx_pending;
+ bnad->config |= BNAD_CF_RXQ_DEPTH;
+ }
+ if (ringparam->tx_pending != bnad->txq_depth) {
+ bnad->txq_depth = ringparam->tx_pending;
+ bnad->config |= BNAD_CF_TXQ_DEPTH;
+ }
+
+ err = bnad_sw_reset_locked(netdev);
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void bnad_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ pauseparam->autoneg = 0;
+ pauseparam->rx_pause = bnad->pause_config.rx_pause;
+ pauseparam->tx_pause = bnad->pause_config.tx_pause;
+}
+
+static int bnad_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ if (pauseparam->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (pauseparam->rx_pause != bnad->pause_config.rx_pause ||
+ pauseparam->tx_pause != bnad->pause_config.tx_pause) {
+ bnad->pause_config.rx_pause = pauseparam->rx_pause;
+ bnad->pause_config.tx_pause = pauseparam->tx_pause;
+ spin_lock_irq(&bnad->priv_lock);
+ bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+ spin_unlock_irq(&bnad->priv_lock);
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static u32 bnad_get_rx_csum(struct net_device *netdev)
+{
+ u32 rx_csum;
+ struct bnad *bnad = netdev_priv(netdev);
+
+ rx_csum = bnad->rx_csum;
+ return rx_csum;
+}
+
+static int bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad->rx_csum = rx_csum;
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static int bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (tx_csum) {
+ netdev->features |= NETIF_F_IP_CSUM;
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ } else {
+ netdev->features &= ~NETIF_F_IP_CSUM;
+ netdev->features &= ~NETIF_F_IPV6_CSUM;
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static int bnad_set_tso(struct net_device *netdev, u32 tso)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (tso) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void bnad_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *string)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i;
+ mutex_lock(&bnad->conf_mutex);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
+ memcpy(string, bnad_net_stats_strings[i],
+ ETH_GSTRING_LEN);
+ string += ETH_GSTRING_LEN;
+ }
+
+ i = 0;
+ sprintf(string, "rxf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_frame_drops", i);
+ string += ETH_GSTRING_LEN;
+
+ sprintf(string, "netif_queue_stopped");
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "bna_state");
+ string += ETH_GSTRING_LEN;
+
+ for (i = 0; i < bnad->cq_num; i++) {
+ sprintf(string, "cq%d_producer_index", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_consumer_index", i);
+ string += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < bnad->rxq_num; i++) {
+ sprintf(string, "rxq%d_packets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_packets_with_error", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed", i);
+ string += ETH_GSTRING_LEN;
+
+ sprintf(string, "rxq%d_producer_index", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index", i);
+ string += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < bnad->txq_num; i++) {
+ sprintf(string, "txq%d_packets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_bytes", i);
+ string += ETH_GSTRING_LEN;
+
+ sprintf(string, "txq%d_producer_index", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_consumer_index", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_hw_consumer_index", i);
+ string += ETH_GSTRING_LEN;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static void bnad_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, bi;
+ unsigned long *net_stats;
+ u64 *stats64;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (bnad_get_stats_count_locked(netdev) != stats->n_stats)
+ goto mismatch;
+
+ bi = 0;
+ memset(buf, 0, stats->n_stats * sizeof(u64));
+ bnad_get_stats(netdev);
+
+ net_stats = (unsigned long *)&bnad->net_stats;
+ for (i = 0; i < sizeof(struct net_device_stats) / sizeof(unsigned long);
+ i++)
+ buf[bi++] = net_stats[i];
+
+ stats64 = (u64 *)&bnad->stats;
+ for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
+ buf[bi++] = stats64[i];
+
+ stats64 = (u64 *) bnad->hw_stats;
+ for (i = 0;
+ i < offsetof(struct bna_stats, rxf_stats[0]) / sizeof(u64);
+ i++)
+ buf[bi++] = stats64[i];
+
+ stats64 = (u64 *)&bnad->hw_stats->txf_stats[0];
+ for (i = 0; i < sizeof(struct bna_stats_txf) / sizeof(u64); i++)
+ buf[bi++] = stats64[i];
+
+ stats64 = (u64 *)&bnad->hw_stats->rxf_stats[0];
+ for (i = 0; i < sizeof(struct bna_stats_rxf) / sizeof(u64); i++)
+ buf[bi++] = stats64[i];
+
+ buf[bi++] = netif_queue_stopped(netdev);
+ buf[bi++] = bnad->state;
+
+ if (bnad->cq_table && bnad->rxq_table && bnad->txq_table) {
+ for (i = 0; i < bnad->cq_num; i++) {
+ buf[bi++] = bnad->cq_table[i].cq.q.producer_index;
+ buf[bi++] = bnad->cq_table[i].cq.q.consumer_index;
+ }
+
+ for (i = 0; i < bnad->rxq_num; i++) {
+ buf[bi++] = bnad->rxq_table[i].rx_packets;
+ buf[bi++] = bnad->rxq_table[i].rx_bytes;
+ buf[bi++] = bnad->rxq_table[i].rx_packets_with_error;
+ buf[bi++] = bnad->rxq_table[i].rxbuf_alloc_failed;
+
+ buf[bi++] = bnad->rxq_table[i].rxq.q.producer_index;
+ buf[bi++] = bnad->rxq_table[i].rxq.q.consumer_index;
+ }
+ for (i = 0; i < bnad->txq_num; i++) {
+ buf[bi++] = bnad->txq_table[i].tx_packets;
+ buf[bi++] = bnad->txq_table[i].tx_bytes;
+
+ buf[bi++] = bnad->txq_table[i].txq.q.producer_index;
+ buf[bi++] = bnad->txq_table[i].txq.q.consumer_index;
+ buf[bi++] = *(bnad->txq_table[i].hw_consumer_index);
+ }
+ }
+
+mismatch:
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int bnad_get_stats_count_locked(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int count;
+
+ count = BNAD_ETHTOOL_STATS_NUM + 10 + bnad->rxq_num * 4
+ + bnad->txq_num * 2;
+
+ /* netif_queue_stopped, state */
+ count += 2;
+
+ /* CQ producer_index, consumer_index */
+ count += bnad->cq_num * 2;
+
+ /* RxQ producer_index, consumer_index */
+ count += bnad->rxq_num * 2;
+
+ /* TxQ producer_index, consumer_index, hw_consumer_index */
+ count += bnad->txq_num * 3;
+ return count;
+}
+
+static int
+bnad_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int count;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ mutex_lock(&bnad->conf_mutex);
+ count = bnad_get_stats_count_locked(netdev);
+ mutex_unlock(&bnad->conf_mutex);
+ return count;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct ethtool_ops bnad_ethtool_ops = {
+ .get_settings = bnad_get_settings,
+ .set_settings = bnad_set_settings,
+ .get_drvinfo = bnad_get_drvinfo,
+ .get_regs_len = bnad_get_regs_len,
+ .get_regs = bnad_get_regs,
+ .get_wol = bnad_get_wol,
+ .get_msglevel = bnad_get_msglevel,
+ .set_msglevel = bnad_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = bnad_get_coalesce,
+ .set_coalesce = bnad_set_coalesce,
+ .get_ringparam = bnad_get_ringparam,
+ .set_ringparam = bnad_set_ringparam,
+ .get_pauseparam = bnad_get_pauseparam,
+ .set_pauseparam = bnad_set_pauseparam,
+ .get_rx_csum = bnad_get_rx_csum,
+ .set_rx_csum = bnad_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = bnad_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = bnad_set_tso,
+ .get_strings = bnad_get_strings,
+ .get_ethtool_stats = bnad_get_ethtool_stats,
+ .get_sset_count = bnad_get_sset_count
+};
+
+void bnad_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+}
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists