[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20080217094445.47aa9508@extreme>
Date: Sun, 17 Feb 2008 09:44:45 -0800
From: Stephen Hemminger <shemminger@...ux-foundation.org>
To: netdev@...r.kernel.org
Subject: Re: [PATHCH 1/16] ServerEngines 10Gb NIC driver
Do all vendor drivers have to come in with the same mistakes.
Where is the vendor driver ugly school, and how can the Linux
developers teach there?
Run this through checkpatch script or just read some of the
things that a quick scan shows.
diff -uprN orig/linux-2.6.24.2/drivers/net/benet/be.h benet/linux-2.6.24.2/drivers/net/benet/be.h
--- orig/linux-2.6.24.2/drivers/net/benet/be.h 1970-01-01 05:30:00.000000000 +0530
+++ benet/linux-2.6.24.2/drivers/net/benet/be.h 2008-02-14 15:23:07.787208928 +0530
@@ -0,0 +1,373 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or at your option any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, 5th Floor
+ * Boston, MA 02110-1301 USA
+ *
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called GPL.
+ *
+ * Contact Information:
+ * linux-drivers@...verengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+#ifndef _BE_H
+#define _BE_H
+
+#include <linux/netdevice.h>
+#include "bni.h"
+
+typedef union _PHYSICAL_ADDRESS {
+ struct {
+ u32 pa_lo;
+ u32 pa_hi;
+ };
+ u64 pa;
+} PHYSICAL_ADDRESS;
Don't introduce typedef's, and creates byteorder dependency.
+
+#define BE_ETHERNET_FCS_SIZE 4
+#define BE_ENET_HEADER_SIZE 14
+#define BE_VLAN_HEADER_SIZE 4
+#define BE_SNAP_HEADER_SIZE 5
+#define BE_HEADER_ETHERNET_II_802_3_SIZE 14
+#define BE_HEADER_802_2_SIZE 3
+#define BE_MIN_ETHER_FRAME_SIZE 64
+#define BE_MIN_SUPPORT_FRAME_SIZE 256
+#define BE_MAX_JUMBO_FRAME_SIZE 9000
+#define BE_MAXIMUM_ETHERNET_FRAME_SIZE 1518 /*With FCS */
Don't define your own constants use values in if_ether.h
+extern unsigned int dbg_mask;
+extern unsigned int pm_resume;
These shouldn't be global, and risk namespace conflict.
+extern char be_drvr_ver[];
+extern char be_fw_ver[];
+extern char be_driver_name[];
const?
+typedef enum {
+ BE_DEVICE_1,
+ BE_DEVICE_2,
+ MAX_BE_DEVICES
+} BE_DEVICE_NUM;
+
+typedef enum {
+ BE_DEV_STATE_NONE,
+ BE_DEV_STATE_INIT,
+ BE_DEV_STATE_OPEN
+} BE_DEV_STATE;
+
+#define BE_DEV_STATE_OPEN(adapter) (adapter->dev_state == BE_DEV_STATE_OPEN)
Don't use wrapper macro open code.
+/*
+ * BE driver statistics.
+ */
+struct _be_stat {
+ u32 bes_tx_reqs; /* number of TX requests initiated */
+ u32 bes_tx_fails; /* number of TX requests that failed */
+ u32 bes_fwd_reqs; /* number of send reqs through forwarding i/f */
+ u32 bes_tx_wrbs; /* number of tx WRBs used */
+
+ u32 bes_ints; /* number of interrupts */
+ u32 bes_polls; /* number of times NAPI called poll function */
+ u32 bes_events; /* total evet entries processed */
+ u32 bes_tx_events; /* number of tx completion events */
+ u32 bes_ucrx_events; /* number of ucast rx completion events */
+ u32 bes_bcrx_events; /* number of bcast rx completion events */
+ u32 bes_tx_compl; /* number of tx completion entries processed */
+ u32 bes_ucrx_compl; /* number of ucrx completion entries
+ processed */
+ u32 bes_bcrx_compl; /* number of bcrx completion entries
+ processed */
+ u32 bes_ethrx_post_fail; /* number of ethrx buffer alloc
+ failures */
+ /*
+ *
+ * number of non ether type II frames dropped where
+ * frame len > length field of Mac Hdr
+ */
+ u32 bes_802_3_dropped_frames;
+ /*
+ * number of non ether type II frames malformed where
+ * in frame len < length field of Mac Hdr
+ */
+ u32 bes_802_3_malformed_frames;
+ u32 bes_ips; /* interrupts / sec */
+ u32 bes_prev_ints; /* bes_ints at last IPS calculation */
+ u16 bes_eth_tx_rate; /* ETH TX rate - Mb/sec */
+ u16 bes_eth_rx_rate; /* ETH RX rate - Mb/sec */
+#ifdef RX_PKT_COALESCE
+ u32 bes_rx_coal; /* Num pkts coalasced */
+ u32 bes_rx_flush; /* Num times coalasced */
+#endif
+ u32 bes_link_change_physical; /*Num of times physical link changed */
+ u32 bes_link_change_virtual; /*Num of times virtual link changed */
+};
Unless these are per-cpu, computing all this will be be a
cpu hog.
+/* Macro to update RX/TX rates */
+#define UPDATE_RATE(AP, JIF, BYTES, RATE) \
+ if ((jiffies - AP->JIF) > 2*(HZ)) { \
+ u32 r; \
+ r = AP->BYTES / ((jiffies-AP->JIF)/(HZ)); \
+ r = (r / 1000000); /* MB/Sec */ \
+ AP->be_stat.RATE = (r * 8); /* Mega Bits/Sec */ \
+ AP->JIF = jiffies; \
+ AP->BYTES = 0; \
+ }
This should be an inline function not a macro.
+/*
+ * Every second we look at the ints/sec and adjust eq_delay
+ * between AP->min_eqd and AP->max_eqd to keep the ints/sec between
+ * IPS_HI_WM and IPS_LO_WM.
+ */
+#define IPS_HI_WM 18000
+#define IPS_LO_WM 8000
+#define UPDATE_IPS(AP, NP) \
+ if ((jiffies - AP->ips_jiffies) > 1*(HZ)) { \
+ /* One second elapsed since last update */ \
+ u32 r, new_eqd = -1; \
+ if (AP->be_stat.bes_prev_ints > \
+ AP->be_stat.bes_ints) { \
+ /* interrupt counter wrapped aroud */ \
+ r = (0xFFFFFFFF - \
+ AP->be_stat.bes_prev_ints) + \
+ AP->be_stat.bes_ints; \
+ } \
+ else \
+ r = AP->be_stat.bes_ints - \
+ AP->be_stat.bes_prev_ints; \
+ r = r / ((jiffies - AP->ips_jiffies)/(HZ)); \
+ AP->be_stat.bes_ips = r; \
+ AP->ips_jiffies = jiffies; \
+ AP->be_stat.bes_prev_ints = \
+ AP->be_stat.bes_ints; \
+ if (r > IPS_HI_WM && \
+ AP->cur_eqd < AP->max_eqd) { \
+ /* increase eqdelay by a notch */ \
+ new_eqd = (AP->cur_eqd + 8); \
+ } \
+ if (r < IPS_LO_WM && \
+ AP->cur_eqd > AP->min_eqd) { \
+ /* decrease eqdelay by a notch */ \
+ new_eqd = (AP->cur_eqd - 8); \
+ } \
+ if (AP->enable_aic && new_eqd != -1) { \
+ /* program new delay */ \
+ if (bni_change_eqd(NP, new_eqd) == \
+ BE_SUCCESS) \
+ AP->cur_eqd = new_eqd; \
+ } \
+ }
This shouldn't be a ugly macro.
+#define FAIL 1
+#define SUCCESS 0
No, don't define private true/false.
+#define MAX_EQD 120
What is this value what does it mean?
+/*
+ * timer to prevent system shutdown hang for ever if h/w stops responding
+ */
+typedef struct {
+ atomic_t get_stat_flag;
+ struct timer_list get_stats_timer;
+ unsigned long get_stat_sem; /* semaphore to wait */
+} be_timer_ctxt_t;
+
+#ifdef RX_PKT_COALESCE
+#define MAX_COALESCE_SIZE 48*1024
+#define MAX_COALESCE_FRAGS (MAX_SKB_FRAGS - 1)
+#define MAX_COALESCE_OBJECTS 8
+/* struture to keep track of adjacent packets in a connection that
+ * can be colesced to a larger packet during RX completion processing.
+ */
+struct be_coalesce_object {
+ PBNI_NET_OBJECT pnob;
+ struct sk_buff *skb;
+ unsigned int frag_cnt;
+ unsigned int next_pkt_seq;
+ unsigned int next_ack_seq;
+ unsigned int tcp_timestamp;
+ unsigned int tcp_tsecr;
+ unsigned int tcp_tsval;
+ unsigned short sport;
+ unsigned short dport;
+ unsigned int saddr;
+ unsigned int daddr;
+ unsigned short last_seen_window;
+ unsigned short mss;
+ unsigned short vlant;
+ unsigned short rsvd;
+
+};
+#endif /* RX_PKT_COALESCE */
If this is done in software, it should be done generally, not just
buried in your specific driver.
+/* This structure is the main BladeEngine driver context. */
+typedef struct _BE_ADAPTER {
+ struct net_device *netdevp;
+ struct _be_stat be_stat;
+ struct net_device_stats benet_stats;
+ u32 num_bars;
+ SA_DEV_BAR_LOCATIONS pci_bars[3]; /* PCI BAR details */
+#ifdef CONFIG_PM
+ u32 pci_state[16];
+#endif
You shouldn't need to do this.
+ SA_DEV sa_device; /* device object owned by beclib */
+ BE_CHIP_OBJECT chip_object; /* BEClib chip object */
+
+ struct tasklet_struct sts_handler;
+ struct timer_list cq_timer;
+ spinlock_t int_lock;
+
+ PIOCTL_ETH_GET_STATISTICS eth_statsp;
+ /*
+ * This will enable the use of ethtool to enable or disable
+ * Checksum on Rx pkts to be obeyed or disobeyed.
+ * If this is TRUE = 1, then whatever is the checksum on the
+ * Received pkt as per BE, it will be given to the stack.
+ * Else the stack will re calculate it.
+ */
+ BOOLEAN rx_csum;
+#ifdef RX_PKT_COALESCE
+ /*
+ * This will enable the use of ethtool to enable or disable
+ * Coalese on Rx pkts to be obeyed or disobeyed.
+ * If this is grater than 0 and less than 16 then coalascing
+ * is enabled else it is disabled
+ */
+ u32 max_rx_coal;
+#endif
+ struct pci_dev *pdev; /* Pointer to OS's PCI dvice */
+
+ spinlock_t txq_lock;
+
+ u32 isr; /* copy of Intr status reg. */
+
+ u32 port0_link_sts; /* Port 0 link status */
+ u32 port1_link_sts; /* port 1 list status */
+ PBE_LINK_STATUS be_link_sts;
+ PBNI_NET_OBJECT net_obj;
+
+ /* Flags to indicate what to clean up */
+ BOOLEAN tasklet_started;
+ BOOLEAN isr_registered;
+ /*
+ * adaptive interrupt coalescing (AIC) related
+ */
+ u16 enable_aic; /* 1 if AIC is enabled */
+ u16 min_eqd; /* minimum EQ delay in usec */
+ u16 max_eqd; /* minimum EQ delay in usec */
+ u16 cur_eqd; /* current EQ delay in usec */
+ /*
+ * book keeping for interrupt / sec and TX/RX rate calculation
+ */
+ ulong ips_jiffies; /* jiffies at last IPS calc */
+ u32 eth_tx_bytes;
+ ulong eth_tx_jiffies;
+ u32 eth_rx_bytes;
+ ulong eth_rx_jiffies;
+
+ struct semaphore get_eth_stat_sem;
+ be_timer_ctxt_t *ctxt; /* context for get stats timer */
+
+#define BE_MAX_MSIX_VECTORS 32
+#define BE_MAX_REQ_MSIX_VECTORS 1
+ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
+ BOOLEAN msix_enabled; /*MSI has been enabled */
+ BOOLEAN dma_64bit_cap; /* is the Device DAC capable */
+ int be_fw_ver; /* BE F/W version */
+ BOOLEAN dev_state; /* The current state of the device */
+
+} BE_ADAPTER, *PBE_ADAPTER;
+
+extern PBE_ADAPTER be_adapter[MAX_BE_DEVICES];
+
+typedef struct be_rx_page_info {
+ struct page *page;
+ dma_addr_t bus;
+ u16 page_offset;
+} BE_RX_PAGE_INFO;
+
+/*
+ * linux_net_object is an extension to BNI's NetObject structure.
+ * NetObject has a pointer to this structure
+ */
+typedef struct {
+ PVOID os_handle; /* Context info for VMM */
+ BNI_RECV_BUFFER eth_rx_bufs[256]; /* to pass Rx buffer
+ addresses */
+ PBE_ADAPTER adapter; /* Pointer to OSM adapter */
+ u32 devno; /* OSM, network dev no. */
+ u32 use_port; /* Current active port */
+ BE_RX_PAGE_INFO *rx_page_info; /* Array of Rx buf pages */
+ u32 rx_pg_info_hd; /* Head of queue */
+ int rxbuf_post_fail; /* RxBuff posting fail count */
+ BOOLEAN rx_pg_shared; /* Is an allocsted page shared as two frags ? */
+ struct vlan_group *vlan_grp;
+ u32 num_vlans; /* Number of vlans in BE's filter */
+ u16 vlan_tag[BE_NUM_VLAN_SUPPORTED]; /* vlans currently configured */
+#ifdef CONFIG_BENET_NAPI
+ struct napi_struct napi;
+ u32 work_quota; /* Max RX packets to process */
+ BOOLEAN rx_sched;
+ spinlock_t rx_lock;
+#endif
+#ifdef RX_PKT_COALESCE
+ struct be_coalesce_object rxc_obj[MAX_COALESCE_OBJECTS];
+ u32 num_coalesce_objects;
+#endif
+} linux_net_object_t;
NO NO typedef, got it.
+/* convenience macro to access members in Linux extension of NetObject */
+#define OSM_NOB(x) ((linux_net_object_t *) (x)->osm_netobj)
Gack, no.
+/* proto declarations */
+
+int benet_probe(struct net_device *netdev);
+void be_set_ethtool_ops(struct net_device *netdev);
+int be_ethtool_ioctl(struct net_device *netdev, struct ifreq *ifr);
+struct net_device_stats *benet_get_stats(struct net_device *);
+
+int be_prepare_interface(PBE_ADAPTER adapter);
+void cleanup_netobject(PBNI_NET_OBJECT);
+void osm_process_sts(unsigned long context);
+irqreturn_t be_int(int irq, PVOID dev, struct pt_regs *regs);
+
+int betx_ether_frame(PBE_ADAPTER pBeAdapter, PBNI_NET_OBJECT NetObject,
+ struct sk_buff *skb, u8 proto, u8 forward,
+ u16 lso_mss);
+
+void post_eth_rx_buffs(PBNI_NET_OBJECT NetObject);
+void get_stat_cb(PVOID context, BESTATUS status, MCC_WRB *optional_wrb);
+
+void get_stats_timer_handler(unsigned long context);
+
+void enable_eq_intr(PBNI_NET_OBJECT pnob);
+void disable_eq_intr(PBNI_NET_OBJECT pnob);
+
+void wait_nic_tx_cmpl(PBNI_NET_OBJECT pnob);
+void be_print_link_info(PBE_LINK_STATUS lnk_status);
+void be_update_link_status(PBE_ADAPTER adapter);
+
+void be_init_procfs(PBE_ADAPTER adapter);
+void be_cleanup_procfs(PBE_ADAPTER adapter);
+
+#ifdef CONFIG_BENET_NAPI
+int be_poll(struct napi_struct *napi, int budget);
+#endif
+#endif /* _BE_H */
diff -uprN orig/linux-2.6.24.2/drivers/net/benet/bni.h benet/linux-2.6.24.2/drivers/net/benet/bni.h
--- orig/linux-2.6.24.2/drivers/net/benet/bni.h 1970-01-01 05:30:00.000000000 +0530
+++ benet/linux-2.6.24.2/drivers/net/benet/bni.h 2008-02-14 15:23:07.788208776 +0530
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or at your option any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, 5th Floor
+ * Boston, MA 02110-1301 USA
+ *
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called GPL.
+ *
+ * Contact Information:
+ * linux-drivers@...verengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+/*
+
+@...e
+ bni.h
+
+@...ef
+ Definitions and macros that are required for all .c files
+ that use the BNI API and implement the BNI API functions
+*/
+#ifndef _BNI_H
+#define _BNI_H
+
+#define _SA_MODULE_NAME "net-driver"
+#include "beclib_ll.h"
+
+#define VLAN_VALID_BIT 0x8000
+#define BE_NUM_VLAN_SUPPORTED 32
+#define BE_PORT_LINK_DOWN 0000
+#define BE_PORT_LINK_UP 0001
+
+typedef unsigned char BOOLEAN;
+
+#define TOU32(_struct_) *((u32 *)(&(_struct_)))
+
+/*
+ * DLs used by the Network driver. 0x00000000 to 0x00000800 are used
+ * by SA/BECLIB
+ */
+typedef enum _NETD_DEBUG_LEVELS {
+ DL_ERROR = DL_ERR,
+ DL_EVENT = 0x00001000,
+ DL_CQ = 0x00002000,
+ DL_SEND = 0x00004000,
+ DL_RECV = 0x00008000,
+ DL_WINDOW = 0x00010000,
+ DL_OFFLOAD = 0x00020000,
+ DL_UPLOAD = 0x00040000,
+ DL_VLAN = 0x00080000,
+ DL_IPSEC = 0x00100000,
+ DL_INT = 0x00200000,
+ DL_PNP = 0x00400000,
+ DL_ETH_INFO = 0x00800000,
+ DL_TIMER = 0x01000000,
+ DL_INIT = 0x02000000,
+ DL_SHUTDOWN = 0x04000000,
+} NET_DEBUG_LEVELS;
+
+/*
+ * Structure to return Ethernet statistics counters maintained by BE.
+ * Defined in srcgen.
+ */
+typedef BE_RXF_STATS BLADE_ETH_STATS, *PBLADE_ETH_STATS;
+
+/*
+@...ef
+ This structure is used by the OSM driver to give BNI
+ physical fragments to use for DMAing data from NIC.
+*/
+typedef struct _BNI_RECV_BUFFER {
+ SA_LIST_ENTRY rxb_list; /* for maintaining a linked list */
+ PVOID rxb_va; /* buffer virtual address */
+ u32 rxb_pa_lo; /* low part of physical address */
+ u32 rxb_pa_hi; /* high part of physical address */
+ u32 rxb_len; /* length of recv buffer */
+ PVOID rxb_ctxt; /* context for OSM driver to use */
+} BNI_RECV_BUFFER, *PBNI_RECV_BUFFER;
+
+/*
+ * fragment list to describe scattered data.
+ */
+typedef struct _BNI_TX_FRAG_LIST {
+ u32 txb_len; /* Size of this fragment */
+ u32 txb_pa_lo; /* Lower 32 bits of 64 bit physical addr */
+ u32 txb_pa_hi; /* Higher 32 bits of 64 bit physical addr */
+} BNI_TX_FRAG_LIST, *PBNI_TX_FRAG_LIST;
+/*
+ * maximum fragements in a TX request
+ */
+#define BE_MAX_TX_FRAG_COUNT (30)
+
+/*
+ * Flag bits for send operation
+ */
+#define IPCS (1 << 0) /* Enable IP checksum offload */
+#define UDPCS (1 << 1) /* Enable UDP checksum offload */
+#define TCPCS (1 << 2) /* Enable TCP checksum offload */
+#define LSO (1 << 3) /* Enable Large Segment offload */
+#define ETHVLAN (1 << 4) /* Enable VLAN insert */
+#define ETHEVENT (1 << 5) /* Generate event on completion */
+#define ETHCOMPLETE (1 << 6) /* Generate completion when done */
+#define IPSEC (1 << 7) /* Enable IPSEC */
+#define FORWARD (1 << 8) /* Send the packet in forwarding path */
+#define FIN (1 << 9) /* Issue FIN segment */
+
+/* @brief
+ * This structure is the main tracking structure for a NIC interface.
+ * This data structure contains OS agnostic data members for processing
+ * intialization, sends, receives, and asynchronous events from the
+ * BladeEngine network function. The OSM driver makes
+ * calls into functions defined at this layer for initialization,
+ * eumeration and population of physical fragments with per-packet
+ * control flags for send and receive operations, population of
+ * receive buffers for NIC , and handling asynchronous
+ * events (such as link status change, packet pattern recognition etc.).
+ */
+typedef struct _BNI_NET_OBJECT {
+
+ /*
+ * MCC Ring - used to send ioctl cmds to embedded ARM processor
+ */
+ PMCC_WRB mcc_q; /* VA of the start of the ring */
+ u32 mcc_q_len; /* # of WRB entries in this ring */
+ u32 mcc_q_hd; /* MCC ring head */
+ u8 mcc_q_created; /* flag to help cleanup */
+ u8 mcc_q_pages; /* Num of pages allocacted by OSM */
+ BE_MCC_OBJECT mcc_q_obj; /* BECLIB's MCC ring Object */
+ SA_PHYSICAL_ADDRESS mcc_q_pa; /* Physical address in LE order */
+ /*
+ * MCC Completion Ring - ARM's responses to ioctls sent from MCC ring
+ */
+ PMCC_CQ_ENTRY mcc_cq; /* VA of the start of the ring */
+ u32 mcc_cq_len; /* # of compl. entries in this ring */
+ u32 mcc_cq_tl; /* compl. ring tail */
+ u8 mcc_cq_created; /* flag to help cleanup */
+ u8 mcc_cq_pages; /* Num of pages allocacted by OSM */
+ BE_CQ_OBJECT mcc_cq_obj; /* BECLIB's MCC compl. ring object */
+ u32 mcc_cq_id; /* MCC ring ID */
+ SA_PHYSICAL_ADDRESS mcc_cq_pa; /* Physical address in LE order */
+ /*
+ * BEClib uses an array of context objects to track outstanding
+ * requests to the MCC. We need allocate the same number of
+ * conext entries as the number of entries in the MCC WRB ring
+ */
+ u8 mcc_wrb_ctxt_pages; /* Num of pages allocacted by OSM */
+ PVOID mcc_wrb_ctxt; /* pointer to the context area */
+ u32 mcc_wrb_ctxtLen; /* Number of entries in the context */
+ /*
+ * NIC send request ring - used for xmitting raw ether frames.
+ */
+ PETH_WRB tx_q; /* VA of the start of the ring */
+ u32 tx_q_len; /* # if entries in the send ring */
+ u32 tx_q_hd; /* Head index. Next req. goes here */
+ u32 tx_q_tl; /* Tail indx. oldest outstanding req. */
+ u8 tx_q_created; /* flag to help cleanup */
+ u8 tx_q_pages; /* Num of pages allocacted by OSM */
+ BE_ETHSQ_OBJECT tx_q_obj; /* BECLIB's send Q handle */
+ SA_PHYSICAL_ADDRESS tx_q_pa; /* Physical address in LE order */
+ u32 tx_q_id; /* send queue ring ID */
+ u32 tx_q_port; /* 0 no binding, 1 port A, 2 port B */
+
+ u32 tx_q_used; /* # of WRBs used */
+ /* ptr to an array in which we store context info for each send req. */
+ PVOID *tx_ctxt;
+ /*
+ * NIC Send compl. ring - completion status for all NIC frames xmitted.
+ */
+ PETH_TX_COMPL tx_cq; /* VA of start of the ring */
+ u32 txcq_len; /* # of entries in the ring */
+ /*
+ * index into compl ring where the host expects next completion entry
+ */
+ u32 tx_cq_tl;
+ u32 tx_cq_id; /* completion queue id */
+ u8 tx_cq_created; /* flag to help cleanup */
+ u8 tx_cq_pages; /* Num of pages allocacted by OSM */
+ BE_CQ_OBJECT tx_cq_obj;
+ SA_PHYSICAL_ADDRESS tx_cq_pa; /* Physical address in LE order */
+ /*
+ * Event Queue - all completion entries post events here.
+ */
+ PEQ_ENTRY event_q; /* VA of start of event queue */
+ u32 event_q_len; /* # of entries */
+ u32 event_q_tl; /* Tail of the event queue */
+ u32 event_q_id; /* Event queue ID */
+ u8 event_q_created; /* flag to help cleanup */
+ u8 event_q_pages; /* Num of pages allocacted by OSM */
+ BE_EQ_OBJECT event_q_obj; /* Queue handle */
+ SA_PHYSICAL_ADDRESS event_q_pa; /* Physical address in LE order */
+ /*
+ * NIC receive queue - Data buffers to be used for receiving unicast,
+ * broadcast and multi-cast frames are posted here.
+ */
+ PETH_RX_D rx_q; /* VA of start of the queue */
+ u32 rx_q_len; /* # of entries */
+ u32 rx_q_hd; /* Head of the queue */
+ u32 rx_q_posted; /* number of posted buffers */
+ u32 rx_q_id; /* queue ID */
+ u8 rx_q_created; /* flag to help cleanup */
+ u8 rx_q_pages; /* Num of pages allocacted by OSM */
+ BE_ETHRQ_OBJECT rx_q_obj; /* NIC RX queue handle */
+ SA_PHYSICAL_ADDRESS rx_q_pa; /* Physical address */
+ /*
+ * Pointer to an array of opaque context object for use by OSM driver
+ */
+ PVOID *rx_ctxt;
+ /*
+ * NIC unicast RX completion queue - all unicast ether frame completion
+ * statuses from BE come here.
+ */
+ PETH_RX_COMPL ucrx_cq; /* VA of start of the queue */
+ u32 ucrx_cq_len; /* # of entries */
+ u32 ucrx_cq_tl; /* Tail of the queue */
+ u32 ucrx_cq_id; /* queue ID */
+ u8 ucrx_cq_created; /* flag to help cleanup */
+ u8 ucrx_cq_pages; /* Num of pages allocacted by OSM */
+ BE_CQ_OBJECT ucrx_cq_obj; /* queue handle */
+ SA_PHYSICAL_ADDRESS ucrx_cq_pa; /* Physical address in LE order */
+ /*
+ * Broadcast RX completion queue - all broadcast and multicast ether
+ * completion statues from BE come here.
+ */
+ PETH_RX_COMPL bcrx_cq; /* VA of start of queue */
+ u32 bcrx_cq_len; /* # of entries */
+ u32 bcrx_cq_tl; /* Tail of the queue */
+ u32 bcrx_cq_id; /* Queue ID */
+ u8 bcrx_cq_created; /* flag to help cleanup */
+ u8 bcrx_cq_pages; /* Num of pages allocacted by OSM */
+ BE_CQ_OBJECT bcrx_cq_obj; /* queue handle */
+ SA_PHYSICAL_ADDRESS bcrx_cq_pa; /* Physical address in LE order */
+
+ BE_FUNCTION_OBJECT fn_obj; /* function object */
+ u32 rx_buf_size; /* Size of the RX buffers */
+ u8 mac_address[6]; /* MAC address */
+ /*
+ * OSM handle. OSM drivers can use this pointer to extend NetObject.
+ */
+ PVOID osm_netobj;
+ SA_SGL mb_sgl; /* SGL for MCC_MAIL_BOX */
+ PVOID mb_ptr; /* mailbox ptr to be freed */
+} BNI_NET_OBJECT, *PBNI_NET_OBJECT;
Even better a pointer typedef, double ugly.
+/*
+ * convenience macros to access some NetObject members
+ */
+#define NET_FH(np) (&(np)->fn_obj)
+
+/*
+ * Functions to advance the head and tail in various rings.
+ */
+static INLINE void bni_adv_eq_tl(PBNI_NET_OBJECT pnob)
+{
+ pnob->event_q_tl = (pnob->event_q_tl + 1) % pnob->event_q_len;
+}
You know divide's are expensive.
+static INLINE void bni_adv_txq_hd(PBNI_NET_OBJECT pnob)
+{
+ pnob->tx_q_hd = (pnob->tx_q_hd + 1) % pnob->tx_q_len;
+}
+
+static INLINE void bni_adv_txq_tl(PBNI_NET_OBJECT pnob)
+{
+ pnob->tx_q_tl = (pnob->tx_q_tl + 1) % pnob->tx_q_len;
+}
+
+static INLINE void bni_adv_txcq_tl(PBNI_NET_OBJECT pnob)
+{
+ pnob->tx_cq_tl = (pnob->tx_cq_tl + 1) % pnob->txcq_len;
+}
+
+static INLINE void bni_adv_rxq_hd(PBNI_NET_OBJECT pnob)
+{
+ pnob->rx_q_hd = (pnob->rx_q_hd + 1) % pnob->rx_q_len;
+}
+
+static INLINE void bni_adv_ucrxcq_tl(PBNI_NET_OBJECT pnob)
+{
+ pnob->ucrx_cq_tl = (pnob->ucrx_cq_tl + 1) % pnob->ucrx_cq_len;
+}
+
+static INLINE void bni_adv_bcrxcq_tl(PBNI_NET_OBJECT pnob)
+{
+ pnob->bcrx_cq_tl = (pnob->bcrx_cq_tl + 1) % pnob->bcrx_cq_len;
+}
+
+static INLINE BESTATUS bni_process_mcc_cmpl(BE_MCC_OBJECT *pMccObj)
+{
+ return (be_mcc_process_cq(pMccObj, 1));
+}
+
+/* forward declarations of function prototypes */
+BESTATUS bni_init(PBE_CHIP_OBJECT);
+BESTATUS bni_create_mcc_rings(PBNI_NET_OBJECT pnob);
+extern void bni_destroy_netobj(PBNI_NET_OBJECT, SA_DEV *);
+void bni_cleanup(PBE_CHIP_OBJECT chipobj);
+
+BESTATUS bni_create_netobj(PBNI_NET_OBJECT, SA_DEV_BAR_LOCATIONS *, u32,
+ SA_DEV *, PBE_CHIP_OBJECT);
+
+BESTATUS bni_tx_pkt(PBNI_NET_OBJECT, PBNI_TX_FRAG_LIST, u32,
+ u32, u32, void *, u32);
+void bni_start_tx(PBNI_NET_OBJECT, u32);
+
+u32 bni_post_rx_buffs(PBNI_NET_OBJECT, PSA_LIST_ENTRY);
+BESTATUS bni_change_eqd(PBNI_NET_OBJECT, u32);
+
+PETH_TX_COMPL bni_get_tx_cmpl(PBNI_NET_OBJECT);
+PETH_RX_COMPL bni_get_ucrx_cmpl(PBNI_NET_OBJECT);
+PETH_RX_COMPL bni_get_bcrx_cmpl(PBNI_NET_OBJECT);
+void bni_notify_cmpl(PBNI_NET_OBJECT, int, int, int);
+
+void bni_enable_intr(PBNI_NET_OBJECT);
+void bni_enable_eq_intr(PBNI_NET_OBJECT);
+void bni_disable_intr(PBNI_NET_OBJECT);
+void bni_disable_eq_intr(PBNI_NET_OBJECT);
+
+u32 bni_get_isr(PBNI_NET_OBJECT);
+
+PEQ_ENTRY bni_get_event(PBNI_NET_OBJECT);
+void bni_notify_event(PBNI_NET_OBJECT, int, int);
+
+BESTATUS bni_get_uc_mac_adrr(PBNI_NET_OBJECT, u8, u8, u8 Pd,
+ PSA_MAC_ADDRESS macAddr,
+ MCC_WRB_CQE_CALLBACK cbf, PVOID cbc);
+
+BESTATUS bni_set_uc_mac_adr(PBNI_NET_OBJECT, u8, u8, u8 Pd,
+ PSA_MAC_ADDRESS macAddr,
+ MCC_WRB_CQE_CALLBACK cbf, PVOID cbc);
+
+BESTATUS bni_set_mc_filter(PBNI_NET_OBJECT pnob, u32 NumMac,
+ BOOLEAN Promiscuous,
+ PSA_MAC_ADDRESS macAddr,
+ MCC_WRB_CQE_CALLBACK cbf, PVOID cbc);
+
+void bni_set_promisc(PBNI_NET_OBJECT pnob);
+void bni_reset_promisc(PBNI_NET_OBJECT pnob);
+BESTATUS bni_config_vlan(PBNI_NET_OBJECT pnob, u16 *VlanId,
+ u32 numVlans, MCC_WRB_CQE_CALLBACK cbf,
+ PVOID cbc, BOOLEAN Promiscuous);
+
+BESTATUS bni_get_stats(PBNI_NET_OBJECT pnob,
+ IOCTL_ETH_GET_STATISTICS *ioctl_va,
+ u64 ioctl_pa, MCC_WRB_CQE_CALLBACK cbf, PVOID cbc);
+
+BESTATUS bni_get_link_sts(PBNI_NET_OBJECT pnob,
+ PBE_LINK_STATUS be_link_sts,
+ MCC_WRB_CQE_CALLBACK cbf, PVOID cbc);
+BESTATUS bni_set_flow_ctll(PBE_FUNCTION_OBJECT pFnObj, boolean txfc_enable,
+ boolean rxfc_enable);
+BESTATUS bni_get_flow_ctl(PBE_FUNCTION_OBJECT pFnObj,
+ boolean *txfc_enable, boolean *rxfc_enable);
+u32 bni_process_rx_flush_cmpl(PBNI_NET_OBJECT pnob);
+
+#endif /* #ifndef _BNI_H_ */
diff -uprN orig/linux-2.6.24.2/drivers/net/benet/be_init.c benet/linux-2.6.24.2/drivers/net/benet/be_init.c
--- orig/linux-2.6.24.2/drivers/net/benet/be_init.c 1970-01-01 05:30:00.000000000 +0530
+++ benet/linux-2.6.24.2/drivers/net/benet/be_init.c 2008-02-14 15:29:34.088482208 +0530
@@ -0,0 +1,1426 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or at your option any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, 5th Floor
+ * Boston, MA 02110-1301 USA
+ *
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called GPL.
+ *
+ * Contact Information:
+ * linux-drivers@...verengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "be.h"
+
+#define DRVR_VERSION "1.0.688"
+
+static struct pci_device_id be_device_id_table[] = {
+ {0x19a2, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, 0}
+};
Use PCI_DEVICE() and just { 0 }
+
+MODULE_DEVICE_TABLE(pci, be_device_id_table);
+
+MODULE_VERSION(DRVR_VERSION);
+
+#define DRV_DESCRIPTION "ServerEngines BladeEngine Network Driver Version " \
+ DRVR_VERSION
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("ServerEngines");
+MODULE_LICENSE("GPL");
+
+unsigned int dbg_mask = (DL_ALWAYS | DL_ERR); /* always show error messages */
+unsigned int msix; /*By default */
+unsigned int ls_mss = (60 * 1024);
More exposed global name space
+unsigned int rxbuf_size = 2048; /*Size of Receive buffers posted */
+
+module_param(msix, uint, (0 | 1));
+module_param(dbg_mask, uint, (DL_ALWAYS | DL_ERR));
+module_param(rxbuf_size, uint, 0);
+
+MODULE_PARM_DESC(msix, "Use MSI-x interrupts");
+MODULE_PARM_DESC(dbg_mask, "Debug mask");
+MODULE_PARM_DESC(rxbuf_size, "Size of buffers to hold Rx data");
+
+static int be_probe(struct pci_dev *, const struct pci_device_id *);
+static void be_remove(struct pci_dev *);
+
+#ifdef CONFIG_PM
+static void be_pm_cleanup(PBE_ADAPTER, PBNI_NET_OBJECT,
+ struct net_device *);
+static void be_up(PBE_ADAPTER);
+static int be_resume(struct pci_dev *);
+
+static int be_suspend(struct pci_dev *, pm_message_t);
+#endif
+
+int be_mcc_init(PBE_ADAPTER adapter);
+void be_update_link_status(PBE_ADAPTER adapter);
+void be_link_status_async_callback(PVOID context, u32 event_code,
+ PVOID event);
+
+char be_drvr_ver[] = DRVR_VERSION;
+char be_fw_ver[32]; /* F/W version filled in by be_probe */
+
+char be_driver_name[] = "benet";
+
+static struct pci_driver be_driver = {
+ name:be_driver_name,
+ id_table:be_device_id_table,
+ probe:be_probe,
+#ifdef CONFIG_PM
+ suspend:be_suspend,
+ resume:be_resume,
+#endif
+ remove:be_remove
+};
Use C99 syntax
.name = be_driver_name,
+
+/*
+ * Number of entries in each queue.
+ */
+#define EVENT_Q_LEN 1024
+#define ETH_TXQ_LEN 2048
+#define ETH_TXCQ_LEN 1024
+#define ETH_RXQ_LEN 1024 /* Does not support any other value */
+#define ETH_UC_RXCQ_LEN 1024
+#define ETH_BC_RXCQ_LEN 256
+#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
+#define MCC_CQ_LEN 256
+
+PBE_ADAPTER be_adapter[MAX_BE_DEVICES];
Having a fixed number of adapters is awkward.
+
+/*
+ * Intialize and register a network device for the NetObject.
+ */
+static int init_be_netdev(PBE_ADAPTER adapter, PBNI_NET_OBJECT pnob)
+{
+ struct net_device *netdev;
+ int ret = 0;
+ unsigned char *p;
+
+#ifdef CONFIG_PM
+ if (pm_resume) {
+ bni_set_uc_mac_adr(pnob, 0, 0, 0,
+ (PSA_MAC_ADDRESS) pnob->mac_address,
+ NULL, NULL);
+ return 0;
+ }
+#endif
+
+ /*
+ * Allocate netdev. No private data structure is
+ * allocated with netdev
+ */
+ netdev = alloc_etherdev(0);
+ if (netdev == NULL)
+ return -ENOMEM;
+
+ p = (u8 *) (pnob->mac_address);
+ /*
+ * Get MAC address from receive table
+ */
+ bni_get_uc_mac_adrr(pnob, 0, 0, OSM_NOB(pnob)->devno,
+ (PSA_MAC_ADDRESS) pnob->mac_address, NULL, NULL);
+
+ memcpy(netdev->dev_addr, pnob->mac_address, 6);
+ netdev->priv = pnob; /* We use the Net Object as private data */
+ netdev->init = &benet_probe;
+ /*
+ * Initialize to No Link. Link will be enabled during
+ * benet_open() or when physical Link is up
+ */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ SET_NETDEV_DEV(netdev, &(adapter->pdev->dev));
+ ret = register_netdev(netdev);
+ if (ret != 0) {
+ TRACE(DL_INIT,
+ "Netdevice registration failed - Errno %d\n", ret);
+ free_netdev(netdev);
+ return (ret);
+ }
+ OSM_NOB(pnob)->os_handle = netdev;
+ return ret;
+}
+
+/* Initialize the pci_info structure for this function */
+static int init_pci_be_function(PBE_ADAPTER adapter, struct pci_dev *pdev)
+{
+ adapter->num_bars = 3;
+ /* CSR */
+ adapter->pci_bars[0].base_pa = pci_resource_start(pdev, 2);
+ adapter->pci_bars[0].base_va =
+ ioremap_nocache(adapter->pci_bars[0].base_pa,
+ pci_resource_len(pdev, 2));
+ if (adapter->pci_bars[0].base_va == NULL)
+ return -ENOMEM;
+ adapter->pci_bars[0].length = sizeof(BLADE_ENGINE_CSRMAP);
+ adapter->pci_bars[0].mem_or_io_mapped = SA_MEM_MAPPED;
+ adapter->pci_bars[0].type = SA_BAR_TYPE_CSR;
+
+ /* Door Bell */
+ adapter->pci_bars[1].base_pa = pci_resource_start(pdev, 4);
+ adapter->pci_bars[1].base_va =
+ ioremap_nocache(adapter->pci_bars[1].base_pa, (128 * 1024));
+ if (adapter->pci_bars[1].base_va == NULL) {
+ iounmap(adapter->pci_bars[0].base_va);
+ return -ENOMEM;
+ }
+ adapter->pci_bars[1].length = sizeof(PROTECTION_DOMAIN_DBMAP);
+ adapter->pci_bars[1].mem_or_io_mapped = SA_MEM_MAPPED;
+ adapter->pci_bars[1].type = SA_BAR_TYPE_PD;
+
+ /* PCI */
+ adapter->pci_bars[2].base_pa = pci_resource_start(pdev, 1);
+ adapter->pci_bars[2].length = pci_resource_len(pdev, 1);
+ adapter->pci_bars[2].base_va =
+ ioremap_nocache(adapter->pci_bars[2].base_pa,
+ adapter->pci_bars[2].length);
+ if (adapter->pci_bars[2].base_va == NULL) {
+ iounmap(adapter->pci_bars[0].base_va);
+ iounmap(adapter->pci_bars[1].base_va);
+ return -ENOMEM;
+ }
+ adapter->pci_bars[2].mem_or_io_mapped = SA_MEM_MAPPED;
+ adapter->pci_bars[2].type = SA_BAR_TYPE_PCI;
+
+ adapter->pdev = pdev;
+
+ return 0;
+}
+
+/*
+ * Enable MSIx and return 1 if successful. Else return 0
+ */
+int be_enable_msix(PBE_ADAPTER adapter)
+{
+ unsigned int i, ret;
+
+ if (!msix)
+ return 0;
+
+ adapter->msix_enabled = 1;
+
+ for (i = 0; i < BE_MAX_REQ_MSIX_VECTORS; i++) {
+ adapter->msix_entries[i].entry = i;
+ }
+
+ ret = pci_enable_msix(adapter->pdev,
+ adapter->msix_entries,
+ BE_MAX_REQ_MSIX_VECTORS);
+
+ if (ret) {
+ adapter->msix_enabled = 0;
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Module init entry point. Registers our our device and return.
+ * Our probe will be called if the device is found.
+ */
+
+static int __init be_init_module(void)
+{
+ int ret;
+
+ if ((rxbuf_size != 8192) && (rxbuf_size != 4096)
+ && (rxbuf_size != 2048)) {
+ printk(KERN_WARNING
+ "Unsupported receive buffer size (%d) requested\n",
+ rxbuf_size);
+ printk(KERN_WARNING
+ "Must be 2048 or 4096. Defaulting to 2048\n");
+ rxbuf_size = 2048;
+ }
COMPILE_BUG_ON??
+ ret = pci_register_driver(&be_driver);
+ TRACE(DL_INIT, "pci_module_init returned %d", ret);
+
+ return ret;
+}
+
+module_init(be_init_module);
+
+/*
+ * be_exit_module - Driver Exit Cleanup Routine
+ */
+static void __exit be_exit_module(void)
+{
+ TRACE(DL_SHUTDOWN, "%s Entry\n", __FUNCTION__);
+
+ pci_unregister_driver(&be_driver);
+}
+
+module_exit(be_exit_module);
+
+/*
+ * Registers ISR for BE. Uses MSIx interrupt if configured and requested.
+ * If not, uses INTx interrupt. Returns 0 for success and -1 for filure.
+ */
+int register_isr(PBE_ADAPTER adapter, PBNI_NET_OBJECT pnob)
Global name conflict
+{
+ int msix_intr, r;
+ struct net_device *netdev = OSM_NOB(pnob)->os_handle;
+ u32 msix_ret = 0;
+
+ netdev->irq = adapter->pdev->irq;
+
+ msix_intr = 0;
+ msix_ret = be_enable_msix(adapter);
+ if (msix_ret) {
+ /* Register MSIx Interrupt handler */
+ r = request_irq(adapter->msix_entries[0].vector,
+ (void *)be_int, IRQF_SHARED,
+ netdev->name, netdev);
+ if (r) {
+ printk(KERN_WARNING
+ "MSIX Request IRQ failed - Errno %d\n", r);
+ } else {
+ msix_intr = 1;
+ TRACE(DL_INIT, "MSIx IRQ %d for %s\n",
+ adapter->msix_entries[0].vector,
+ netdev->name);
+ }
+ }
+ if (msix_intr == 0) {
+ /* request legacy INTx interrupt */
+ r = request_irq(netdev->irq, (void *)be_int,
+ IRQF_SHARED, netdev->name, netdev);
+ if (r) {
+ printk(KERN_ERR
+ "INTx Request IRQ failed - Errno %d\n", r);
+ return (-1);
+ }
+ TRACE(DL_INIT, "BE: INTx IRQ %d for %s\n",
+ netdev->irq, netdev->name);
+ }
+ return (0);
+}
+
+/*
+ * This function is called by the PCI sub-system when it finds a PCI
+ * device with dev/vendor IDs that match with one of our devices.
+ * All of the driver initialization is done in this function.
+ */
+static int be_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pdev_id)
+{
+ int status = 0;
+ PBE_ADAPTER adapter = NULL;
+ u32 r;
+ u32 adapt_num = 0;
+ IOCTL_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD ioctl_pload;
+ PBNI_NET_OBJECT pnob = NULL;
+
+ TRACE(DL_INFO, "Entering probe");
+ while (adapt_num < MAX_BE_DEVICES) {
+ if (!be_adapter[adapt_num])
+ break;
+ adapt_num++;
+ }
+
+ if (adapt_num == MAX_BE_DEVICES) {
+ printk(KERN_WARNING "Cannot support more than %d BE Adapters",
+ MAX_BE_DEVICES);
+ return -1;
+ }
+
+ status = pci_enable_device(pdev);
+ if (status) {
+ printk(KERN_ERR "pci_enable_device() for BE adapter %d failed",
+ adapt_num);
+ return status;
+ }
+
+ status = pci_request_regions(pdev, be_driver_name);
+ if (status)
+ return status;
+
+ pci_set_master(pdev);
+
+ adapter = (PBE_ADAPTER) kmalloc(sizeof(BE_ADAPTER), GFP_KERNEL);
+ if (adapter == NULL) {
+ TRACE(DL_INIT,
+ "Failed to alloc memory for adapter structure\n");
+ pci_release_regions(pdev);
+ goto err_ret;
+ }
+
+ memset(adapter, 0, sizeof(BE_ADAPTER));
+
+ be_adapter[adapt_num] = adapter;
+ /*
+ * Adapative interrupt coalescing limits in usecs.
+ * should be a multiple of 8.
+ */
+ adapter->enable_aic = 1;
+ adapter->max_eqd = MAX_EQD;
+ adapter->min_eqd = 0;
+ adapter->cur_eqd = 0; /* start with no EQ delay */
+ r = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ if (!r) {
+ /* Device is DAC Capable. */
+ adapter->dma_64bit_cap = TRUE;
+ } else {
+ adapter->dma_64bit_cap = FALSE;
+ r = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (r) {
+ printk(KERN_ERR "Could not set PCI DMA Mask\n");
+ return r;
+ }
+ }
+
+ status = init_pci_be_function(adapter, pdev);
+ if (status < 0) {
+ printk(KERN_ERR "Failed to map PCI BARS\n");
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+
+ (void)sa_trace_set_level(dbg_mask);
+
+ r = bni_init(&adapter->chip_object);
+ if (r != 0) {
+ printk("bni_init() failed - Error %d\n", r);
+ goto cleanup1;
+ }
+
+ /* Allocate Memory for getting the Link status */
+ adapter->be_link_sts = (PBE_LINK_STATUS)
+ kmalloc(sizeof(BE_LINK_STATUS), GFP_KERNEL);
+ if (adapter->be_link_sts == NULL) {
+ printk("Memory allocation for link status buffer failed\n");
+ goto cleanup1;
+ }
+ spin_lock_init(&adapter->txq_lock);
+
+ status = be_prepare_interface(adapter);
+ if (status < 0) {
+ goto cleanup1;
+ }
+ pnob = adapter->net_obj;
+
+ /* if the rx_frag size if 2K, one page is shared as two RX frags */
+ OSM_NOB(pnob)->rx_pg_shared =
+ (pnob->rx_buf_size <= PAGE_SIZE / 2) ? TRUE : FALSE;
+ if (pnob->rx_buf_size != rxbuf_size) {
+ printk(KERN_WARNING
+ "Could not set Rx buffer size to %d. Using %d\n",
+ rxbuf_size, pnob->rx_buf_size);
+ rxbuf_size = pnob->rx_buf_size;
+ }
+
+ tasklet_init(&(adapter->sts_handler), osm_process_sts,
+ (unsigned long)adapter);
+ adapter->tasklet_started = 1; /* indication to cleanup */
+ spin_lock_init(&(adapter->int_lock));
+
+ if (register_isr(adapter, pnob) != 0)
+ goto cleanup;
+
+ adapter->isr_registered = 1;
+ adapter->rx_csum = 1; /* enable RX checksum check */
+#ifdef RX_PKT_COALESCE
+ adapter->max_rx_coal = MAX_COALESCE_FRAGS;
+#endif
+
+ /* print the version numbers */
+ memset(&ioctl_pload, 0,
+ sizeof(IOCTL_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD));
+ printk(KERN_INFO "BladeEngine Driver version:%s. "
+ "Copyright ServerEngines, Corporation 2005 - 2008\n",
+ be_drvr_ver);
+ if (be_function_get_fw_version(&pnob->fn_obj, &ioctl_pload, NULL,
+ NULL) == BE_SUCCESS) {
+ adapter->be_fw_ver =
+ simple_strtoul(ioctl_pload.firmware_version_string + 4,
+ NULL, 10);
+ strncpy(be_fw_ver, ioctl_pload.firmware_version_string, 32);
+ printk(KERN_INFO "BladeEngine Firmware Version:%s\n",
+ ioctl_pload.firmware_version_string);
+ } else {
+ printk(KERN_WARNING "Unable to get BE Firmware Version\n");
+ }
+
+ sema_init(&adapter->get_eth_stat_sem, 0);
+
+ adapter->ctxt = (be_timer_ctxt_t *)
+ kmalloc(sizeof(be_timer_ctxt_t), GFP_KERNEL);
+
+ init_timer(&adapter->ctxt->get_stats_timer);
+ atomic_set(&adapter->ctxt->get_stat_flag, 0);
+ adapter->ctxt->get_stats_timer.function = &get_stats_timer_handler;
+
+ status = be_mcc_init(adapter);
+ if (status < 0) {
+ goto cleanup;
+ }
+
+ be_update_link_status(adapter);
+
+ /* Register async call back function to handle link status updates */
+ if (be_mcc_add_async_event_callback(&adapter->net_obj->mcc_q_obj,
+ be_link_status_async_callback,
+ (PVOID) adapter) != BE_SUCCESS) {
+ printk(KERN_WARNING "add_async_event_callback failed");
+ printk(KERN_WARNING
+ "Link status changes may not be reflected\n");
+ }
+
+ /* Enable ChipInterrupt and EQ Interrupt */
+ bni_enable_intr(adapter->net_obj);
+ enable_eq_intr(adapter->net_obj);
+ adapter->dev_state = BE_DEV_STATE_INIT;
+ return 0; /* successful return */
+
+cleanup1:
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ be_adapter[adapt_num] = NULL;
+ kfree(adapter);
+ goto err_ret;
+
+cleanup:
+ be_remove(pdev);
+
+err_ret:
+ printk(KERN_ERR "BladeEngine init failed\n");
+ return -ENOMEM;
+}
+
+/*
+ * Get the current link status and print the status on console
+ */
+void be_update_link_status(PBE_ADAPTER adapter)
+{
+ int status;
+ PBNI_NET_OBJECT pnob = adapter->net_obj;
+
+ status = bni_get_link_sts(pnob, adapter->be_link_sts, NULL, NULL);
+
+ if (status == BE_SUCCESS) {
+ if (adapter->be_link_sts->mac0_speed &&
+ adapter->be_link_sts->mac0_duplex)
+ adapter->port0_link_sts = BE_PORT_LINK_UP;
+ else
+ adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+
+ if (adapter->be_link_sts->mac1_speed &&
+ adapter->be_link_sts->mac1_duplex)
+ adapter->port1_link_sts = BE_PORT_LINK_UP;
+ else
+ adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+
+ printk(KERN_INFO "Link Properties for %s:\n",
+ ((struct net_device *)(OSM_NOB(pnob)->os_handle))->name);
+ be_print_link_info(adapter->be_link_sts);
+ return;
+ }
+ printk(KERN_WARNING "Could not get link status for %s\n",
+ ((struct net_device *)(OSM_NOB(pnob)->os_handle))->name);
+ return;
+}
+
+/* This function handles async callback for link status */
+void be_link_status_async_callback(PVOID context, u32 event_code,
+ PVOID event)
+{
+ ASYNC_EVENT_LINK_STATE *link_status = (ASYNC_EVENT_LINK_STATE *) event;
+ PBE_ADAPTER adapter = (PBE_ADAPTER) context;
+ BOOLEAN link_enable = FALSE;
+ PBNI_NET_OBJECT pnob;
+ ASYNC_EVENT_TRAILER *async_trailer;
+ struct net_device *netdev;
+
+ if (event_code != ASYNC_EVENT_CODE_LINK_STATE) {
+ /* Not our event to handle */
+ return;
+ }
+ async_trailer = (ASYNC_EVENT_TRAILER *) ((u8 *) event +
+ sizeof(MCC_CQ_ENTRY) - sizeof (ASYNC_EVENT_TRAILER));
+
+ SA_ASSERT(async_trailer->event_code == ASYNC_EVENT_CODE_LINK_STATE);
+
+ pnob = adapter->net_obj;
+ SA_ASSERT(pnob);
+ netdev = (struct net_device *)OSM_NOB(pnob)->os_handle;
+ SA_ASSERT(netdev);
+
+ /* Determine if this event is a switch VLD or a physical link event */
+ if (async_trailer->event_type == NTWK_LINK_TYPE_VIRTUAL) {
+ adapter->be_stat.bes_link_change_virtual++;
+ if (adapter->be_link_sts->active_port !=
+ link_status->active_port) {
+ printk("Active port changed due to VLD on switch\n");
+ } else {
+ /* Link of atleast one of the ports changed */
+ printk("Link status update\n");
+ }
+
+ } else {
+ adapter->be_stat.bes_link_change_physical++;
+ if (adapter->be_link_sts->active_port !=
+ link_status->active_port) {
+ printk("Active port changed due to port link status"
+ " change\n");
+ } else {
+ /* Link of atleast one of the ports changed */
+ printk("Link status update\n");
+ }
+ }
+
+ /* Clear memory of adapter->be_link_sts */
+ memset(adapter->be_link_sts, 0, sizeof(adapter->be_link_sts));
+
+ if ((link_status->port0_link_status == ASYNC_EVENT_LINK_UP) ||
+ (link_status->port1_link_status == ASYNC_EVENT_LINK_UP)) {
+ if ((adapter->port0_link_sts == BE_PORT_LINK_DOWN) &&
+ (adapter->port1_link_sts == BE_PORT_LINK_DOWN)) {
+ /*
+ * Earlier both the ports are down
+ * So link is up
+ */
+ link_enable = TRUE;
+ }
+
+ if (link_status->port0_link_status == ASYNC_EVENT_LINK_UP) {
+ adapter->port0_link_sts = BE_PORT_LINK_UP;
+ adapter->be_link_sts->mac0_duplex =
+ link_status->port0_duplex;
+ adapter->be_link_sts->mac0_speed =
+ link_status->port0_speed;
+ if (link_status->active_port == NTWK_PORT_A)
+ adapter->be_link_sts->active_port = 0;
+ } else
+ adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+
+ if (link_status->port1_link_status == ASYNC_EVENT_LINK_UP) {
+ adapter->port1_link_sts = BE_PORT_LINK_UP;
+ adapter->be_link_sts->mac1_duplex =
+ link_status->port1_duplex;
+ adapter->be_link_sts->mac1_speed =
+ link_status->port1_speed;
+ if (link_status->active_port == NTWK_PORT_B)
+ adapter->be_link_sts->active_port = 1;
+ } else
+ adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+
+ printk(KERN_INFO "Link Properties for %s:\n", netdev->name);
+ be_print_link_info(adapter->be_link_sts);
+
+ if (!link_enable)
+ return;
+ /*
+ * Both ports were down previously, but atleast one of
+ * them has come up if this netdevice's carrier is not up,
+ * then indicate to stack
+ */
+ if (!netif_carrier_ok(netdev)) {
+ netif_start_queue(netdev);
+ netif_carrier_on(netdev);
+ }
+ return;
+ }
+
+ /* Now both the ports are down. Tell the stack about it */
+ printk(KERN_INFO "Both ports are down\n");
+
+ adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+ adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+
+ /* if this netdevice's carrier is not down, then indicate to stack */
+ if (netif_carrier_ok(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ return;
+}
+
+/* Function to initialize MCC rings */
+int be_mcc_init(PBE_ADAPTER adapter)
+{
+ u32 n, r, m;
+ PBNI_NET_OBJECT pnob;
+
+ pnob = adapter->net_obj;
+ if (!pm_resume) {
+ be_init_procfs(adapter);
+ /*
+ * Create the MCC ring so that all further communication with
+ * MCC can go thru the ring. we do this at the end since
+ * we do not want to be dealing with interrupts until the
+ * initialization is complete.
+ */
+ pnob->mcc_q_len = MCC_Q_LEN;
+ n = pnob->mcc_q_len * sizeof(MCC_WRB);
+ n = MAX(n, PAGE_SIZE);
+ /* Get number of pages */
+ m = (n + (PAGE_SIZE - 1)) / (PAGE_SIZE);
+ pnob->mcc_q =
+ (PMCC_WRB) __get_free_pages(GFP_KERNEL, sa_log2(m));
+ if (pnob->mcc_q == NULL)
+ goto cleanup;
+ pnob->mcc_q_pages = m;
+ pnob->mcc_q_pa = virt_to_phys(pnob->mcc_q);
+ pnob->mcc_q_pa = cpu_to_le64(pnob->mcc_q_pa);
+ /*
+ * space for MCC WRB context
+ */
+ pnob->mcc_wrb_ctxtLen = MCC_Q_LEN;
+ n = pnob->mcc_wrb_ctxtLen * sizeof(BE_MCC_WRB_CONTEXT);
+ n = MAX(n, PAGE_SIZE); /* Need to allocate alteast one page */
+ /* Get number of pages */
+ m = (n + (PAGE_SIZE - 1)) / (PAGE_SIZE);
+ pnob->mcc_wrb_ctxt =
+ (PVOID) __get_free_pages(GFP_KERNEL, sa_log2(m));
+ if (pnob->mcc_wrb_ctxt == NULL)
+ goto cleanup;
+ pnob->mcc_wrb_ctxt_pages = m;
+ /*
+ * Space for MCC compl. ring
+ */
+ pnob->mcc_cq_len = MCC_CQ_LEN;
+ n = pnob->mcc_cq_len * sizeof(MCC_CQ_ENTRY);
+ n = MAX(n, PAGE_SIZE); /* Need to allocate alteast one page */
+ /* Get number of pages */
+ m = (n + (PAGE_SIZE - 1)) / (PAGE_SIZE);
+ pnob->mcc_cq =
+ (PMCC_CQ_ENTRY) __get_free_pages(GFP_KERNEL,
+ sa_log2(m));
+ if (pnob->mcc_cq == NULL)
+ goto cleanup;
+ pnob->mcc_cq_pa = virt_to_phys(pnob->mcc_cq);
+ pnob->mcc_cq_pa = cpu_to_le64(pnob->mcc_cq_pa);
+ pnob->mcc_cq_pages = m;
+
+ }
+ memset(pnob->mcc_q, 0, pnob->mcc_q_pages * PAGE_SIZE);
+ pnob->mcc_q_hd = 0;
+
+ memset(pnob->mcc_wrb_ctxt, 0,
+ pnob->mcc_wrb_ctxt_pages * PAGE_SIZE);
+
+ memset(pnob->mcc_cq, 0, pnob->mcc_cq_pages * PAGE_SIZE);
+ pnob->mcc_cq_tl = 0;
+
+ r = bni_create_mcc_rings(adapter->net_obj);
+ if (r != BE_SUCCESS)
+ goto cleanup;
+
+ return 0;
+cleanup:
+ TRACE(DL_INIT, "Failed to create MCC rings\n");
+ return -ENOMEM;
+
+}
+
+static void be_remove(struct pci_dev *pdev)
+{
+ PBNI_NET_OBJECT pnob = NULL;
+ PBE_ADAPTER adapter = NULL;
+ int adapt_num = 0;
+ int i;
+
+ while (adapt_num < MAX_BE_DEVICES) {
+ if ((be_adapter[adapt_num]) &&
+ (be_adapter[adapt_num]->pdev == pdev)) {
+ adapter = be_adapter[adapt_num];
+ pnob = (BNI_NET_OBJECT *) adapter->net_obj;
+ break;
+ }
+ adapt_num++;
+ }
+
+ SA_ASSERT(adapter);
+
+ flush_scheduled_work();
+
+ /* Unregister async call back function for link status updates */
+ if (be_mcc_add_async_event_callback(&pnob->mcc_q_obj,
+ NULL, NULL) != BE_SUCCESS)
+ printk(KERN_WARNING "Unregister async callback for link "
+ "status updates failed.\n");
+
+ cleanup_netobject(adapter->net_obj);
+
+ be_cleanup_procfs(adapter);
+
+ bni_cleanup(&adapter->chip_object);
+
+ for (i = 0; i < adapter->num_bars; i++) {
+ if (adapter->pci_bars[i].base_va) {
+ iounmap(adapter->pci_bars[i].base_va);
+ }
+ }
+
+ pci_release_regions(adapter->pdev);
+ pci_disable_device(adapter->pdev);
+
+ /* Free Link status structure */
+ if (adapter->be_link_sts) {
+ kfree(adapter->be_link_sts);
+ }
+
+ if (adapter->eth_statsp) {
+ kfree(adapter->eth_statsp);
+ }
+
+ del_timer_sync(&adapter->ctxt->get_stats_timer);
+
+ if (adapter->ctxt) {
+ kfree(adapter->ctxt);
+ }
+
+ be_adapter[adapt_num] = NULL;
+ kfree(adapter);
+}
+
+static int be_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+#ifdef CONFIG_PM
+ struct net_device *netdev = NULL;
+ PBNI_NET_OBJECT pnob = NULL;
+ PBE_ADAPTER adapter = NULL;
+
+ int adapt_num = 0;
+ while (adapt_num < MAX_BE_DEVICES) {
+ if (be_adapter[adapt_num] &&
+ (be_adapter[adapt_num]->pdev == pdev)) {
+ adapter = be_adapter[adapt_num];
+ pnob = (BNI_NET_OBJECT *) adapter->netdevp->priv;
+ netdev = adapter->netdevp;
+ netif_device_detach(netdev);
+ break;
+ }
+ adapt_num++;
+ }
+ SA_ASSERT(adapter);
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ be_pm_cleanup(adapter, pnob, netdev);
+
+ pci_enable_wake(pdev, 3, 1);
+ pci_enable_wake(pdev, 4, 1); /* D3 Cold = 4 */
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+#endif
+ return 0;
+}
+
+static int be_resume(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PM
+ int status = 0;
+ struct net_device *netdev = NULL;
+ PBNI_NET_OBJECT pnob = NULL;
+ PBE_ADAPTER adapter = NULL;
+ u32 adapt_num = 0;
+
+ pm_resume = 1;
+ while (adapt_num < MAX_BE_DEVICES) {
+ if (be_adapter[adapt_num] &&
+ (be_adapter[adapt_num]->pdev == pdev)) {
+ adapter = be_adapter[adapt_num];
+ pnob = (BNI_NET_OBJECT *) adapter->netdevp->priv;
+ netdev = adapter->netdevp;
+ netif_device_detach(netdev);
+ break;
+ }
+ adapt_num++;
+ }
+ SA_ASSERT(adapter);
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return status;
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, 3, 0);
+ pci_enable_wake(pdev, 4, 0); /* 4 is D3 cold */
+
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+
+ if (netif_running(netdev)) {
+ status = be_prepare_interface(adapter);
+
+ if (status < 0) {
+ return (status);
+ }
+ status = be_mcc_init(adapter);
+ if (status < 0) {
+ printk(KERN_ERR "be_mcc_init failed\n");
+ return (status);
+ }
+ be_update_link_status(adapter);
+ /*
+ * Register async call back function to handle link
+ * status updates
+ */
+ if (be_mcc_add_async_event_callback(
+ &adapter->net_obj->mcc_q_obj,
+ be_link_status_async_callback,
+ (PVOID) adapter) != BE_SUCCESS) {
+ printk(KERN_WARNING "add_async_event_callback failed");
+ printk(KERN_WARNING
+ "Link status changes may not be reflected\n");
+ }
+ bni_enable_intr(pnob);
+ enable_eq_intr(pnob);
+ be_up(adapter);
+ }
+ netif_device_attach(netdev);
+ pm_resume = 0;
+#endif
+ return 0;
+
+}
+
+#ifdef CONFIG_PM
+static void be_pm_cleanup(PBE_ADAPTER adapter,
+ PBNI_NET_OBJECT pnob, struct net_device *netdev)
+{
+ u32 i;
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ wait_nic_tx_cmpl(pnob);
+ disable_eq_intr(pnob);
+ if (adapter->tasklet_started) {
+ tasklet_kill(&(adapter->sts_handler));
+ adapter->isr_registered = 0;
+ }
+ if (adapter->isr_registered) {
+ free_irq(netdev->irq, netdev);
+ adapter->tasklet_started = 0;
+ }
+ /* Disable chip interrupt */
+ bni_disable_intr(pnob);
+ bni_destroy_netobj(pnob, &adapter->sa_device);
+
+ if (pnob->rx_ctxt) {
+ BE_RX_PAGE_INFO *rx_page_info;
+
+ /*
+ * go through RX context array and free
+ * data buffs
+ */
+ for (i = 0; i < pnob->rx_q_len; i++) {
+ rx_page_info = &(OSM_NOB(pnob)->rx_page_info[i]);
+ if ((OSM_NOB(pnob)->rx_pg_shared == FALSE) ||
+ (rx_page_info->page_offset))
+ pci_unmap_page(adapter->pdev,
+ pci_unmap_addr(rx_page_info,
+ bus),
+ pnob->rx_buf_size,
+ PCI_DMA_FROMDEVICE);
+ if (rx_page_info->page)
+ put_page(rx_page_info->page);
+ memset(rx_page_info, 0, sizeof(BE_RX_PAGE_INFO));
+ }
+ OSM_NOB(pnob)->rx_pg_info_hd = 0;
+ }
+
+}
+
+static void be_up(PBE_ADAPTER adapter)
+{
+ PBNI_NET_OBJECT pnob = adapter->net_obj;
+
+ if (OSM_NOB(pnob)->num_vlans != 0)
+ bni_config_vlan(pnob, OSM_NOB(pnob)->vlan_tag,
+ OSM_NOB(pnob)->num_vlans, NULL, NULL, 0);
+
+}
+#endif
+
+static int be_setup_tx_res(PBNI_NET_OBJECT NetObject)
+{
+ int n;
+
+ n = NetObject->tx_q_len * sizeof(PVOID *);
+ if (!pm_resume) {
+ NetObject->tx_ctxt = (PVOID *) kmalloc(n, GFP_KERNEL);
+
+ if (NetObject->tx_ctxt == NULL) {
+ TRACE(DL_INIT,
+ "Failed to alloc memory for tx_ctxt\n");
+ return -1;
+ }
+ }
+ memset(NetObject->tx_ctxt, 0, n);
+ return 0;
+}
+
+static int be_setup_rx_res(PBNI_NET_OBJECT NetObject)
+{
+ int n;
+
+ if (!pm_resume) {
+ n = (NetObject->rx_q_len * sizeof(PVOID));
+ NetObject->rx_ctxt = kmalloc(n, GFP_KERNEL);
+ if (NetObject->rx_ctxt == NULL) {
+ TRACE(DL_INIT, "Failed to alloc memory for rx_ctxt\n");
+ return -1;
+ }
+
+ n = (NetObject->rx_q_len * sizeof(BE_RX_PAGE_INFO));
+ OSM_NOB(NetObject)->rx_page_info = kmalloc(n, GFP_KERNEL);
+ if (OSM_NOB(NetObject)->rx_page_info == NULL) {
+ TRACE(DL_INIT,
+ "Failed to alloc memory for receive page info\n");
+ kfree(NetObject->rx_ctxt);
+ return -1;
+ }
+ }
+
+ memset(NetObject->rx_ctxt, 0, NetObject->rx_q_len * sizeof(PVOID));
+ memset(OSM_NOB(NetObject)->rx_page_info, 0,
+ NetObject->rx_q_len * sizeof(BE_RX_PAGE_INFO));
+ OSM_NOB(NetObject)->rx_pg_info_hd = 0;
+ NetObject->rx_q_hd = 0;
+ NetObject->rx_q_posted = 0;
+ /* post ETH RX buffers */
+ post_eth_rx_buffs(NetObject);
+
+ return 0;
+}
+
+/*
+ * free all resources associated with a NetObject
+ * Called at the time of module cleanup as well a any error during
+ * module init. Some resources may be partially allocated in a NetObj.
+ */
+void cleanup_netobject(PBNI_NET_OBJECT pnob)
+{
+ struct net_device *netdev;
+ PBE_ADAPTER adapter;
+ struct sk_buff *skb;
+ int i;
+
+ SA_ASSERT(pnob);
+ netdev = (struct net_device *)OSM_NOB(pnob)->os_handle;
+ SA_ASSERT(netdev);
+ adapter = (PBE_ADAPTER) OSM_NOB(pnob)->adapter;
+ SA_ASSERT(adapter);
+
+ /* Only if this netdev is up */
+ if (netif_running(netdev)) {
+ /*
+ * Let us stop the dev queue for the
+ * interface associated with this netobj.
+ */
+ netif_stop_queue(netdev);
+
+ /* Wait until no more pending transmits */
+ wait_nic_tx_cmpl(pnob);
+
+ /* Disable this EQ's interrupt */
+ disable_eq_intr(pnob);
+ }
+
+ if ((adapter->isr_registered) & (adapter->msix_enabled))
+ free_irq(adapter->msix_entries[0].vector, netdev);
+ else if ((adapter->isr_registered) & !(adapter->msix_enabled))
+ free_irq(netdev->irq, netdev);
+
+ adapter->isr_registered = 0;
+ if (adapter->msix_enabled) {
+ pci_disable_msix(adapter->pdev);
+ adapter->msix_enabled = 0;
+ }
+ if (adapter->tasklet_started) {
+ tasklet_kill(&(adapter->sts_handler));
+ adapter->tasklet_started = 0;
+ }
+ /* Disable chip interrupt */
+ bni_disable_intr(pnob);
+
+ unregister_netdev(netdev);
+ /* memory associted with netdev is freed by OS */
+
+ /* Destroy Net Object */
+ bni_destroy_netobj(pnob, &adapter->sa_device);
+
+ adapter->net_obj = NULL;
+ adapter->netdevp = NULL;
+
+ /* free all the memory allocated for the queues */
+
+ if (pnob->mcc_q) {
+ free_pages((unsigned long)pnob->mcc_q,
+ sa_log2(pnob->mcc_q_pages));
+ }
+
+ if (pnob->mcc_wrb_ctxt) {
+ free_pages((unsigned long)pnob->mcc_wrb_ctxt,
+ sa_log2(pnob->mcc_wrb_ctxt_pages));
+ }
+
+ if (pnob->mcc_cq) {
+ free_pages((unsigned long)pnob->mcc_cq,
+ sa_log2(pnob->mcc_cq_pages));
+ }
+
+ if (pnob->event_q) {
+ free_pages((unsigned long)pnob->event_q,
+ sa_log2(pnob->event_q_pages));
+ }
+
+ if (pnob->tx_cq) {
+ free_pages((unsigned long)pnob->tx_cq,
+ sa_log2(pnob->tx_cq_pages));
+ }
+
+ if (pnob->tx_q) {
+ free_pages((unsigned long)pnob->tx_q,
+ sa_log2(pnob->tx_q_pages));
+ }
+
+ if (pnob->bcrx_cq) {
+ free_pages((unsigned long)pnob->bcrx_cq,
+ sa_log2(pnob->bcrx_cq_pages));
+ }
+
+ if (pnob->rx_q) {
+ free_pages((unsigned long)pnob->rx_q,
+ sa_log2(pnob->rx_q_pages));
+ }
+
+ if (pnob->ucrx_cq) {
+ free_pages((unsigned long)pnob->ucrx_cq,
+ sa_log2(pnob->ucrx_cq_pages));
+ }
+
+ /* free all allocated memory stored in the net object */
+ if (pnob->rx_ctxt) {
+ BE_RX_PAGE_INFO *rx_page_info;
+ /*
+ * go through RX context array and free data buffs
+ */
+ for (i = 0; i < pnob->rx_q_len; i++) {
+ rx_page_info = &(OSM_NOB(pnob)->rx_page_info[i]);
+ if ((OSM_NOB(pnob)->rx_pg_shared == FALSE) ||
+ (rx_page_info->page_offset)) {
+ pci_unmap_page(adapter->pdev,
+ pci_unmap_addr(rx_page_info, bus),
+ pnob->rx_buf_size, PCI_DMA_FROMDEVICE);
+ }
+ if (rx_page_info->page) {
+ put_page(rx_page_info->page);
+ }
+ memset(rx_page_info, 0, sizeof(BE_RX_PAGE_INFO));
+ }
+ OSM_NOB(pnob)->rx_pg_info_hd = 0;
+ kfree(OSM_NOB(pnob)->rx_page_info);
+ kfree(pnob->rx_ctxt);
+ }
+
+ if (pnob->tx_ctxt) {
+ for (i = 0; i < pnob->tx_q_len; i++) {
+ skb = (struct sk_buff *)pnob->tx_ctxt[i];
+ if (skb) {
+ kfree_skb(skb);
+ }
+ }
+ kfree(pnob->tx_ctxt);
+ }
+
+ if (pnob->mb_ptr) {
+ kfree(pnob->mb_ptr);
+ }
+
+ if (OSM_NOB(pnob)) {
+ kfree(OSM_NOB(pnob));
+ }
+
+ /* finally, free the net object itself */
+ kfree(pnob);
+
+}
+
+/*
+ * this function creates a NetObject with a set of Eth rings.
+ */
+int be_prepare_interface(PBE_ADAPTER adapter)
+{
+ struct net_device *netdev = NULL;
+ PBNI_NET_OBJECT pnob = NULL;
+ SA_DEV_BAR_LOCATIONS pci_bars[3];
+ int status;
+ u32 n, m;
+ PVOID p;
+
+ if (!pm_resume) {
+ /*Normal Mode */
+ memcpy(pci_bars, adapter->pci_bars,
+ sizeof(adapter->pci_bars));
+
+ pnob = (PBNI_NET_OBJECT)
+ kmalloc(sizeof(BNI_NET_OBJECT), GFP_KERNEL);
+
+ if (pnob == NULL) {
+ TRACE(DL_INIT,
+ "Failed to alloc memory for NetObject\n");
+ goto err_ret1;
+ }
+ memset(pnob, 0, sizeof(BNI_NET_OBJECT));
+ TRACE(DL_INIT, "Done with net obj alloc\n");
+
+ pnob->osm_netobj = (linux_net_object_t *)
+ kmalloc(sizeof(linux_net_object_t), GFP_KERNEL);
+ if (pnob->osm_netobj == NULL) {
+ TRACE(DL_INIT,
+ "Failed to alloc memory OSM NetObject\n");
+ kfree(pnob);
+ goto err_ret1;
+ }
+ memset(pnob->osm_netobj, 0, sizeof(linux_net_object_t));
+
+ OSM_NOB(pnob)->devno = 0;
+ OSM_NOB(pnob)->adapter = adapter;
+
+ /* Mail box sgl */
+ pnob->mb_sgl.length = sizeof(MCC_MAILBOX);
+ p = kmalloc(pnob->mb_sgl.length + 16, GFP_KERNEL);
+ if (p == NULL) {
+ TRACE(DL_INIT,
+ "Failed to alloc mem for MCC_MAILBOX\n");
+ goto err_ret1;
+ }
+ /* Mailbox pointer needs to be 16 byte aligned */
+ pnob->mb_ptr = p;
+ p = (PVOID) ((unsigned long)(p + 15) & ~0xf);
+ pnob->mb_sgl.va = (void *)p;
+ pnob->mb_sgl.pa = virt_to_phys(p);
+ pnob->mb_sgl.pa = cpu_to_le64(pnob->mb_sgl.pa);
+ /*
+ * Event queue
+ */
+ pnob->event_q_len = EVENT_Q_LEN;
+ n = pnob->event_q_len * sizeof(EQ_ENTRY);
+ n = MAX(n, (2 * PAGE_SIZE));
+ /* Get number of pages */
+ m = (n + (PAGE_SIZE - 1)) / (PAGE_SIZE);
+ pnob->event_q = (PEQ_ENTRY)
+ __get_free_pages(GFP_KERNEL, sa_log2(m));
+ if (pnob->event_q == NULL)
+ goto err_ret1;
+ pnob->event_q_pa = virt_to_phys(pnob->event_q);
+ pnob->event_q_pa = cpu_to_le64(pnob->event_q_pa);
+ pnob->event_q_pages = m;
+ /*
+ * Eth TX queue
+ */
+ pnob->tx_q_len = ETH_TXQ_LEN;
+ pnob->tx_q_port = 0; /* No port binding */
+ n = pnob->tx_q_len * sizeof(ETH_WRB);
+ n = MAX(n, PAGE_SIZE); /* Need to allocate alteast one page */
+ /* Get number of pages */
+ m = (n + (PAGE_SIZE - 1)) / (PAGE_SIZE);
+ pnob->tx_q = (PETH_WRB)
+ __get_free_pages(GFP_KERNEL, sa_log2(m));
+ if (pnob->tx_q == NULL)
+ goto err_ret1;
+ pnob->tx_q_pa = virt_to_phys(pnob->tx_q);
+ pnob->tx_q_pa = cpu_to_le64(pnob->tx_q_pa);
+ pnob->tx_q_pages = m;
+ /*
+ * Eth TX Compl queue
+ */
+ pnob->txcq_len = ETH_TXCQ_LEN;
+ n = pnob->txcq_len * sizeof(ETH_TX_COMPL);
+ n = MAX(n, PAGE_SIZE); /* Need to allocate alteast one page */
+ /* Get number of pages */
+ m = (n + (PAGE_SIZE - 1)) / (PAGE_SIZE);
+ pnob->tx_cq = (PETH_TX_COMPL)
+ __get_free_pages(GFP_KERNEL, sa_log2(m));
+ if (pnob->tx_cq == NULL)
+ goto err_ret1;
+ pnob->tx_cq_pa = virt_to_phys(pnob->tx_cq);
+ pnob->tx_cq_pa = cpu_to_le64(pnob->tx_cq_pa);
+ pnob->tx_cq_pages = m;
+ /*
+ * Eth RX queue
+ */
+ pnob->rx_q_len = ETH_RXQ_LEN;
+ n = pnob->rx_q_len * sizeof(ETH_RX_D);
+ n = MAX(n, PAGE_SIZE); /* Need to allocate alteast one page */
+ /* Get number of pages */
+ m = (n + (PAGE_SIZE - 1)) / (PAGE_SIZE);
+ pnob->rx_q = (PETH_RX_D)
+ __get_free_pages(GFP_KERNEL, sa_log2(m));
+ if (pnob->rx_q == NULL)
+ goto err_ret1;
+ pnob->rx_q_pa = virt_to_phys(pnob->rx_q);
+ pnob->rx_q_pa = cpu_to_le64(pnob->rx_q_pa);
+ pnob->rx_q_pages = m;
+ /*
+ * Eth Unicast RX Compl queue
+ */
+ pnob->ucrx_cq_len = ETH_UC_RXCQ_LEN;
+ n = pnob->ucrx_cq_len * sizeof(ETH_RX_COMPL);
+ n = MAX(n, PAGE_SIZE); /* Need to allocate alteast one page */
+ /* Get number of pages */
+ m = (n + (PAGE_SIZE - 1)) / (PAGE_SIZE);
+ pnob->ucrx_cq = (PETH_RX_COMPL)
+ __get_free_pages(GFP_KERNEL, sa_log2(m));
+ if (pnob->ucrx_cq == NULL)
+ goto err_ret1;
+ pnob->ucrx_cq_pa = virt_to_phys(pnob->ucrx_cq);
+ pnob->ucrx_cq_pa = cpu_to_le64(pnob->ucrx_cq_pa);
+ pnob->ucrx_cq_pages = m;
+ /*
+ * Eth Broadcast RX Compl queue
+ */
+ pnob->bcrx_cq_len = ETH_BC_RXCQ_LEN;
+ n = pnob->bcrx_cq_len * sizeof(ETH_RX_COMPL);
+ n = MAX(n, PAGE_SIZE);
+ /* Get number of pages */
+ m = (n + (PAGE_SIZE - 1)) / (PAGE_SIZE);
+ pnob->bcrx_cq = (PETH_RX_COMPL)
+ __get_free_pages(GFP_KERNEL, sa_log2(m));
+ if (pnob->bcrx_cq == NULL)
+ goto err_ret1;
+ pnob->bcrx_cq_pa = virt_to_phys(pnob->bcrx_cq);
+ pnob->bcrx_cq_pa = cpu_to_le64(pnob->bcrx_cq_pa);
+ pnob->bcrx_cq_pages = m;
+
+ /* Allocate DMA'ble Memory for IOCTL_ETH_GET_STATISTICS */
+ adapter->eth_statsp = (IOCTL_ETH_GET_STATISTICS *)
+ kmalloc(sizeof(IOCTL_ETH_GET_STATISTICS), GFP_KERNEL);
+ if (adapter->eth_statsp == NULL) {
+ TRACE(DL_INIT,
+ "Failed to alloc memory for Eth stats\n");
+ goto err_ret1;
+ }
+ pnob->rx_buf_size = rxbuf_size;
+ /*
+ * Set dev close to be TRUE. This will be enabled on dev open
+ */
+ adapter->dev_state = BE_DEV_STATE_NONE;
+ } else {
+ pnob = adapter->net_obj;
+ memcpy(pci_bars, adapter->pci_bars,
+ sizeof(adapter->pci_bars));
+ }
+
+ memset(pnob->event_q, 0, pnob->event_q_pages * PAGE_SIZE);
+ pnob->event_q_tl = 0;
+
+ memset(pnob->tx_q, 0, pnob->tx_q_pages * PAGE_SIZE);
+ pnob->tx_q_hd = 0;
+ pnob->tx_q_tl = 0;
+
+ memset(pnob->tx_cq, 0, pnob->tx_cq_pages * PAGE_SIZE);
+ pnob->tx_cq_tl = 0;
+
+ memset(pnob->rx_q, 0, pnob->rx_q_pages * PAGE_SIZE);
+
+ memset(pnob->ucrx_cq, 0, pnob->ucrx_cq_pages * PAGE_SIZE);
+ pnob->ucrx_cq_tl = 0;
+
+ memset(pnob->bcrx_cq, 0, pnob->bcrx_cq_pages * PAGE_SIZE);
+ pnob->bcrx_cq_tl = 0;
+ n = bni_create_netobj(pnob, pci_bars, adapter->num_bars,
+ &adapter->sa_device,
+ &adapter->chip_object);
+ if (n != BE_SUCCESS) {
+ TRACE(DL_ERROR, "bni_create_netobj failed - returned %x", n);
+ goto err_ret1;
+ }
+ TRACE(DL_INIT, "Creation of NetObject Done");
+
+ status = init_be_netdev(adapter, pnob);
+ if (status < 0)
+ goto err_ret;
+ netdev = OSM_NOB(pnob)->os_handle;
+
+#ifdef CONFIG_BENET_NAPI
+ netif_napi_add(netdev, &OSM_NOB(pnob)->napi, be_poll, 64);
+ OSM_NOB(pnob)->rx_sched = FALSE;
+ spin_lock_init(&OSM_NOB(pnob)->rx_lock);
+#endif
+
+ if (be_setup_tx_res(pnob))
+ goto err_ret;
+ if (be_setup_rx_res(pnob))
+ goto err_ret;
+
+ if (!pm_resume) {
+ adapter->netdevp = OSM_NOB(pnob)->os_handle;
+ adapter->net_obj = pnob;
+ }
+ return 0;
+
+err_ret:
+ cleanup_netobject(pnob);
+
+err_ret1:
+ printk(KERN_ERR "Interface initialization failed\n");
+ return -1;
+}
+
+void enable_eq_intr(PBNI_NET_OBJECT pnob)
+{
+ bni_enable_eq_intr(pnob);
+}
+
+void disable_eq_intr(PBNI_NET_OBJECT pnob)
+{
+ bni_disable_eq_intr(pnob);
+}
+
+/* Wait until no more pending transmits */
+void wait_nic_tx_cmpl(PBNI_NET_OBJECT pnob)
+{
+ int i;
+
+ /* Wait for 20us * 50000 (= 1s) and no more */
+ i = 0;
+ while ((pnob->tx_q_tl != pnob->tx_q_hd) && (i < 50000)) {
+ ++i;
+ udelay(20);
+ }
+
+ /* Check for no more pending transmits */
+ if (i >= 50000) {
+ printk(KERN_WARNING
+ "Did not receive completions for all TX requests\n");
+ }
+}
diff -uprN orig/linux-2.6.24.2/drivers/net/benet/be_netif.c benet/linux-2.6.24.2/drivers/net/benet/be_netif.c
--- orig/linux-2.6.24.2/drivers/net/benet/be_netif.c 1970-01-01 05:30:00.000000000 +0530
+++ benet/linux-2.6.24.2/drivers/net/benet/be_netif.c 2008-02-14 15:31:33.420341008 +0530
@@ -0,0 +1,597 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or at your option any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, 5th Floor
+ * Boston, MA 02110-1301 USA
+ *
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called GPL.
+ *
+ * Contact Information:
+ * linux-drivers@...verengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+/*
+ * be_netif.c
+ *
+ * This file contains various entry points of drivers seen by tcp/ip stack.
+ */
+
+#include <linux/pci.h>
+#include "be.h"
+#include <linux/ip.h>
+
+extern unsigned int ls_mss;
+
+unsigned int pm_resume;
+
+/* Strings to print Link properties */
+char *link_speed[] = {
+ "Invalid link Speed Value",
+ "10 Mbps",
+ "100 Mbps",
+ "1 Gbps",
+ "10 Gbps"
+};
+
+char *link_duplex[] = {
+ "Invalid Duplex Value",
+ "Half Duplex",
+ "Full Duplex"
+};
+
+#ifdef BE_POLL_MODE
+struct net_device *irq_netdev;
+#endif
+
+int benet_xmit(struct sk_buff *skb, struct net_device *netdev);
+int benet_set_mac_addr(struct net_device *netdev, void *p);
+
+void be_print_link_info(PBE_LINK_STATUS lnk_status)
+{
+ printk("PortNo 0:");
+ if (lnk_status->mac0_speed && lnk_status->mac0_duplex) {
+ /* Port is up and running */
+ if (lnk_status->mac0_speed < 5)
+ printk(" Link Speed: %s,",
+ link_speed[lnk_status->mac0_speed]);
+ else
+ printk(" %s,", link_speed[0]);
+
+ if (lnk_status->mac0_duplex < 3)
+ printk(" %s",
+ link_duplex[lnk_status->mac0_duplex]);
+ else
+ printk(" %s", link_duplex[0]);
+
+ if (lnk_status->active_port == 0)
+ printk("(active)\n");
+ else
+ printk("\n");
+ } else
+ printk(" Down \n");
+
+ printk("PortNo 1:");
+ if (lnk_status->mac1_speed && lnk_status->mac1_duplex) {
+ /* Port is up and running */
+ if (lnk_status->mac1_speed < 5)
+ printk(" Link Speed: %s,",
+ link_speed[lnk_status->mac1_speed]);
+ else
+ printk(" %s,", link_speed[0]);
+
+ if (lnk_status->mac1_duplex < 3)
+ printk(" %s",
+ link_duplex[lnk_status->mac1_duplex]);
+ else
+ printk(" %s", link_duplex[0]);
+
+ if (lnk_status->active_port == 1)
+ printk("(active)\n");
+ else
+ printk("\n");
+ } else
+ printk(" Down \n");
+
+ return;
+}
+
+int benet_open(struct net_device *netdev)
+{
+ PBNI_NET_OBJECT pnob = (BNI_NET_OBJECT *) netdev->priv;
+ PBE_ADAPTER adapter = OSM_NOB(pnob)->adapter;
+
+ TRACE(DL_INIT, "entered-benet_open()");
+
+ if (adapter->dev_state < BE_DEV_STATE_INIT)
+ return -EAGAIN;
+
+ be_update_link_status(adapter);
+
+ /*
+ * Set carrier on only if Physical Link up
+ * Either of the port link status up signifies this
+ */
+ if ((adapter->port0_link_sts == BE_PORT_LINK_UP) ||
+ (adapter->port1_link_sts == BE_PORT_LINK_UP)) {
+ netif_start_queue(netdev);
+ netif_carrier_on(netdev);
+ }
+
+ enable_eq_intr(pnob);
+ adapter->dev_state = BE_DEV_STATE_OPEN;
+
+#ifdef CONFIG_BENET_NAPI
+ napi_enable(&OSM_NOB(pnob)->napi);
+#endif
+ return 0;
+}
+
+int benet_close(struct net_device *netdev)
+{
+ PBNI_NET_OBJECT pnob = (BNI_NET_OBJECT *) netdev->priv;
+ PBE_ADAPTER adapter = OSM_NOB(pnob)->adapter;
+
+ /* Stop Transmitting */
+ netif_stop_queue(netdev);
+
+ synchronize_irq(netdev->irq);
+
+ /* Wait until no more pending transmits */
+ wait_nic_tx_cmpl(pnob);
+
+ adapter->dev_state = BE_DEV_STATE_INIT;
+
+ netif_carrier_off(netdev);
+
+ adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+ adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+
+#ifdef CONFIG_BENET_NAPI
+ napi_disable(&OSM_NOB(pnob)->napi);
+#endif
+ return 0;
+}
+
+int benet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ TRACE(DL_INIT, "entered benet_ioctl()");
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return be_ethtool_ioctl(dev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * Setting a Mac Address for BE
+ * Takes netdev and a void pointer as arguments.
+ * The pointer holds the new addres to be used.
+ */
+int benet_set_mac_addr(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+ PBNI_NET_OBJECT pnob;
+ SA_MAC_ADDRESS mac_addr;
+
+ SA_ASSERT(netdev);
+ pnob = (PBNI_NET_OBJECT) netdev->priv;
+ SA_ASSERT(pnob);
+
+ memcpy(pnob->mac_address, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(mac_addr.bytes, pnob->mac_address, SA_MAC_ADDRESS_SIZE);
+ bni_set_uc_mac_adr(pnob, 0, 0, OSM_NOB(pnob)->devno,
+ &mac_addr, NULL, NULL);
+ /*
+ * Since we are doing Active-Passive failover, both
+ * ports should have matching MAC addresses everytime.
+ */
+ bni_set_uc_mac_adr(pnob, 1, 0, OSM_NOB(pnob)->devno,
+ &mac_addr, NULL, NULL);
+
+ return 0;
+}
+
+void get_stats_timer_handler(unsigned long context)
+{
+ be_timer_ctxt_t *ctxt = (be_timer_ctxt_t *) context;
+ if (atomic_read(&ctxt->get_stat_flag)) {
+ atomic_dec(&ctxt->get_stat_flag);
+ up((PVOID) ctxt->get_stat_sem);
+ }
+ del_timer(&ctxt->get_stats_timer);
+ return;
+}
+
+void get_stat_cb(PVOID context, BESTATUS status, MCC_WRB *optional_wrb)
+{
+ be_timer_ctxt_t *ctxt = (be_timer_ctxt_t *) context;
+ /*
+ * just up the semaphore if the get_stat_flag
+ * reads 1. so that the waiter can continue.
+ * If it is 0, then it was handled by the timer handler.
+ */
+ if (atomic_read(&ctxt->get_stat_flag)) {
+ atomic_dec(&ctxt->get_stat_flag);
+ up((PVOID) ctxt->get_stat_sem);
+ }
+}
+
+struct net_device_stats *benet_get_stats(struct net_device *dev)
+{
+ PBNI_NET_OBJECT pnob = dev->priv;
+ PBE_ADAPTER adapter = OSM_NOB(pnob)->adapter;
+ u64 pa;
+ be_timer_ctxt_t *ctxt = adapter->ctxt;
+
+ if (!BE_DEV_STATE_OPEN(adapter) || (pm_resume)) {
+ /* Return previously read stats */
+ return &(adapter->benet_stats);
+ }
+ /* Get Physical Addr */
+ pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
+ sizeof(IOCTL_ETH_GET_STATISTICS),
+ PCI_DMA_FROMDEVICE);
+ pa = cpu_to_le64(pa);
+ ctxt->get_stat_sem = (unsigned long)&adapter->get_eth_stat_sem;
+ bni_get_stats(adapter->net_obj, adapter->eth_statsp,
+ pa, get_stat_cb, (PVOID) ctxt);
+ atomic_inc(&ctxt->get_stat_flag);
+ ctxt->get_stats_timer.data = (unsigned long)ctxt;
+ mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
+ down((PVOID) ctxt->get_stat_sem); /* block till callback is called */
+
+ /*Adding port0 and port1 stats. */
+ adapter->benet_stats.rx_packets =
+ adapter->eth_statsp->params.response.p0recvdtotalframes +
+ adapter->eth_statsp->params.response.p1recvdtotalframes;
+ adapter->benet_stats.tx_packets =
+ adapter->eth_statsp->params.response.p0xmitunicastframes +
+ adapter->eth_statsp->params.response.p1xmitunicastframes;
+ adapter->benet_stats.tx_bytes =
+ adapter->eth_statsp->params.response.p0xmitbyteslsd +
+ adapter->eth_statsp->params.response.p1xmitbyteslsd;
+ adapter->benet_stats.rx_errors =
+ adapter->eth_statsp->params.response.p0crcerrors +
+ adapter->eth_statsp->params.response.p1crcerrors;
+ adapter->benet_stats.rx_errors +=
+ adapter->eth_statsp->params.response.p0alignmentsymerrs +
+ adapter->eth_statsp->params.response.p1alignmentsymerrs;
+ adapter->benet_stats.rx_errors +=
+ adapter->eth_statsp->params.response.p0inrangelenerrors +
+ adapter->eth_statsp->params.response.p1inrangelenerrors;
+ adapter->benet_stats.rx_bytes =
+ adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
+ adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
+ adapter->benet_stats.rx_crc_errors =
+ adapter->eth_statsp->params.response.p0crcerrors +
+ adapter->eth_statsp->params.response.p1crcerrors;
+
+ adapter->benet_stats.tx_packets +=
+ adapter->eth_statsp->params.response.p0xmitmulticastframes +
+ adapter->eth_statsp->params.response.p1xmitmulticastframes;
+ adapter->benet_stats.tx_packets +=
+ adapter->eth_statsp->params.response.p0xmitbroadcastframes +
+ adapter->eth_statsp->params.response.p1xmitbroadcastframes;
+ adapter->benet_stats.tx_errors = 0;
+
+ adapter->benet_stats.multicast =
+ adapter->eth_statsp->params.response.p0xmitmulticastframes +
+ adapter->eth_statsp->params.response.p1xmitmulticastframes;
+
+ adapter->benet_stats.rx_fifo_errors =
+ adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
+ adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
+ adapter->benet_stats.rx_frame_errors =
+ adapter->eth_statsp->params.response.p0alignmentsymerrs +
+ adapter->eth_statsp->params.response.p1alignmentsymerrs;
+ adapter->benet_stats.rx_length_errors =
+ adapter->eth_statsp->params.response.p0inrangelenerrors +
+ adapter->eth_statsp->params.response.p1inrangelenerrors;
+ adapter->benet_stats.rx_length_errors +=
+ adapter->eth_statsp->params.response.p0outrangeerrors +
+ adapter->eth_statsp->params.response.p1outrangeerrors;
+ adapter->benet_stats.rx_length_errors +=
+ adapter->eth_statsp->params.response.p0frametoolongerrors +
+ adapter->eth_statsp->params.response.p1frametoolongerrors;
+
+ pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
+ sizeof(IOCTL_ETH_GET_STATISTICS),
+ PCI_DMA_FROMDEVICE);
+ return &(adapter->benet_stats);
+
+}
+
+/*
+ * function called by the stack for transmitting an ether frame
+ */
+int benet_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ PBNI_NET_OBJECT pnob = netdev->priv;
+ PBE_ADAPTER adapter = OSM_NOB(pnob)->adapter;
+ u8 proto;
+ struct iphdr *ip;
+ u16 lso_mss;
+#ifdef NETIF_F_TSO
+ u32 segs;
+
+ lso_mss = skb_shinfo(skb)->gso_size;
+ segs = skb_shinfo(skb)->gso_segs;
+ /*
+ * bug# 3356.
+ * If a LSO request translates into a single segment,
+ * it should be posted as a ethernet WRB with no LSO.
+ */
+ if (segs == 1)
+ lso_mss = 0;
+#else
+ lso_mss = 0;
+#endif /*TSO */
+
+ TRACE(DL_SEND, "benet_xmit: Entry... len = %d", skb->len);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip = (struct iphdr *)ip_hdr(skb);
+ proto = ip->protocol;
+ } else {
+ proto = 0;
+ }
+
+ if (betx_ether_frame(adapter, pnob, skb, proto, 0, lso_mss) !=
+ BE_SUCCESS) {
+ return 1; /* NETDEV_TX_BUSY */
+ }
+
+ netdev->trans_start = jiffies;
+ TRACE(DL_SEND, "benet_xmit() : Exit");
+ return 0; /*NETDEV_TX_OK */
+
+}
+
+/*
+ * This is the driver entry point to change the mtu of the device
+ * Returns 0 for success and errno for failure.
+ */
+int benet_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ u32 mtu, max_mtu, max_hdr;
+ max_hdr = BE_ENET_HEADER_SIZE + BE_ETHERNET_FCS_SIZE +
+ BE_SNAP_HEADER_SIZE + BE_HEADER_802_2_SIZE;
+
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ max_hdr += BE_VLAN_HEADER_SIZE;
+
+ mtu = new_mtu + max_hdr;
+
+ /*
+ * BE supports jumbo frame size upto 9000 bytes including the link layer
+ * header. Considering the different variants of frame formats possible
+ * like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes
+ */
+ max_mtu = BE_MAX_JUMBO_FRAME_SIZE;
+
+ if ((mtu < BE_MIN_ETHER_FRAME_SIZE) || (mtu > max_mtu)) {
+ printk(KERN_WARNING "Invalid MTU requested. "
+ "Must be between %d and %d bytes\n",
+ BE_MIN_SUPPORT_FRAME_SIZE, (max_mtu - max_hdr));
+ return -EINVAL;
+ }
+ printk(KERN_INFO "MTU changed from %d to %d\n", netdev->mtu,
+ new_mtu);
+ netdev->mtu = new_mtu;
+ return 0;
+}
+
+/*
+ * This is the driver entry point to register a vlan with the device
+ */
+void benet_vlan_register(struct net_device *netdev, struct vlan_group *grp)
+{
+ PBNI_NET_OBJECT pnob = netdev->priv;
+
+ TRACE(DL_VLAN, "vlan register called");
+
+ disable_eq_intr(pnob);
+ OSM_NOB(pnob)->vlan_grp = grp;
+ OSM_NOB(pnob)->num_vlans = 0;
+ enable_eq_intr(pnob);
+}
+
+/*
+ * This is the driver entry point to add a vlan vlan_id
+ * with the device netdev
+ */
+void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id)
+{
+ PBNI_NET_OBJECT pnob = netdev->priv;
+
+ TRACE(DL_VLAN, "Add vlan ID");
+ if (OSM_NOB(pnob)->num_vlans == (BE_NUM_VLAN_SUPPORTED-1)) {
+ /* no way to return an error */
+ printk(KERN_ERR
+ "BladeEngine: Cannot configure more than %d Vlans\n",
+ BE_NUM_VLAN_SUPPORTED);
+ return;
+ }
+ /*The new vlan tag will be in the slot indicated by num_vlans. */
+ OSM_NOB(pnob)->vlan_tag[OSM_NOB(pnob)->num_vlans++] = vlan_id;
+ bni_config_vlan(pnob, OSM_NOB(pnob)->vlan_tag,
+ OSM_NOB(pnob)->num_vlans, NULL, NULL, 0);
+}
+
+/*
+ * This is the driver entry point to remove a vlan vlan_id
+ * with the device netdev
+ */
+void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id)
+{
+ PBNI_NET_OBJECT pnob = netdev->priv;
+
+ u32 i, value;
+
+ TRACE(DL_VLAN, "Remove vlan ID");
+ /*
+ * In Blade Engine, we support 32 vlan tag filters across both ports.
+ * To program a vlan tag, the RXF_RTPR_CSR register is used.
+ * Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries.
+ * The Vlan table is of depth 16. thus we support 32 tags.
+ */
+
+ value = vlan_id | VLAN_VALID_BIT;
+ TRACE(DL_VLAN, "Value is %x", value);
+ TRACE(DL_VLAN, "Number of vlan tags is %d", OSM_NOB(pnob)->num_vlans);
+ for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) {
+ TRACE(DL_VLAN, "Value at index %d is %x", i,
+ OSM_NOB(pnob)->vlan_tag[i]);
+ if (OSM_NOB(pnob)->vlan_tag[i] == vlan_id) {
+ TRACE(DL_VLAN, "Vlan ID found at index %d", i);
+ break;
+ }
+ }
+
+ if (i == BE_NUM_VLAN_SUPPORTED) {
+ TRACE(DL_VLAN, "Vlan ID %d not dound - remove failed", value);
+ return;
+ }
+ /* Now compact the vlan tag array by removing hole created. */
+ while ((i + 1) < BE_NUM_VLAN_SUPPORTED) {
+ OSM_NOB(pnob)->vlan_tag[i] = OSM_NOB(pnob)->vlan_tag[i + 1];
+ i++;
+ }
+ if ((i + 1) == BE_NUM_VLAN_SUPPORTED) {
+ OSM_NOB(pnob)->vlan_tag[i] = (u16) 0x0;
+ }
+ OSM_NOB(pnob)->num_vlans--;
+ bni_config_vlan(pnob, OSM_NOB(pnob)->vlan_tag,
+ OSM_NOB(pnob)->num_vlans, NULL, NULL, 0);
+ TRACE(DL_VLAN, "Removed the vlan ID of %d", vlan_id);
+}
+
+/*
+ * This function is called to program multicast
+ * address in the multicast filter of the ASIC.
+ */
+void be_set_multicast_filter(struct net_device *netdev)
+{
+ PBNI_NET_OBJECT pnob = netdev->priv;
+ struct dev_mc_list *mc_ptr;
+ SA_MAC_ADDRESS mac_addr[32];
+ int i;
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ /* set BE in Multicast promiscuous */
+ bni_set_mc_filter(pnob, 0, TRUE, NULL, NULL, NULL);
+ return;
+ }
+
+ for (mc_ptr = netdev->mc_list, i = 0; mc_ptr;
+ mc_ptr = mc_ptr->next, i++) {
+ memcpy(mac_addr[i].bytes, mc_ptr->dmi_addr,
+ SA_MAC_ADDRESS_SIZE);
+ }
+ /* reset the promiscuous mode also. */
+ bni_set_mc_filter(pnob, i, FALSE, mac_addr, NULL, NULL);
+
+}
+
+/*
+ * This is the driver entry point to set multicast list
+ * with the device netdev. This function will be used to
+ * set promiscuous mode or multicast promiscuous mode
+ * or multicast mode....
+ */
+void benet_set_multicast_list(struct net_device *netdev)
+{
+ PBNI_NET_OBJECT pnob = netdev->priv;
+ PBE_ADAPTER adapter = OSM_NOB(pnob)->adapter;
+
+ if (netdev->flags & IFF_PROMISC) {
+ bni_set_promisc(adapter->net_obj);
+
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ bni_reset_promisc(adapter->net_obj);
+ be_set_multicast_filter(netdev);
+ } else {
+ bni_reset_promisc(adapter->net_obj);
+ be_set_multicast_filter(netdev);
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+ disable_irq(netdev->irq);
+ be_int(netdev->irq, netdev, NULL);
+ enable_irq(netdev->irq);
+}
+#endif
+
+/*
+ * standard entry point functions for all Linux network interface drivers
+ */
+int benet_probe(struct net_device *netdev)
+{
+ PBNI_NET_OBJECT pnob = netdev->priv;
+ PBE_ADAPTER adapter = OSM_NOB(pnob)->adapter;
+
+ TRACE(DL_INIT, "entered-benet_probe().");
+
+ ether_setup(netdev);
+
+ netdev->open = &benet_open;
+ netdev->stop = &benet_close;
+ netdev->do_ioctl = &benet_ioctl;
+
+ netdev->hard_start_xmit = &benet_xmit;
+
+ netdev->get_stats = &benet_get_stats;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = &be_netpoll;
+#endif
+
+ netdev->set_multicast_list = &benet_set_multicast_list;
+
+ netdev->change_mtu = &benet_change_mtu;
+ netdev->set_mac_address = &benet_set_mac_addr;
+
+ netdev->vlan_rx_register = benet_vlan_register;
+ netdev->vlan_rx_add_vid = benet_vlan_add_vid;
+ netdev->vlan_rx_kill_vid = benet_vlan_rem_vid;
+
+ netdev->features =
+ NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM;
+
+ netdev->flags |= IFF_MULTICAST;
+
+ /* If device is DAC Capable, set the HIGHDMA flag for netdevice. */
+ if (adapter->dma_64bit_cap)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+#ifdef NETIF_F_TSO
+ netdev->features |= NETIF_F_TSO;
+#endif
+
+ be_set_ethtool_ops(netdev);
why is normal macro not good enough?
+ return 0;
+}
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists