lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ca0148c30810082336t569a3e63vcb4989bbb7485349@mail.gmail.com>
Date:	Thu, 9 Oct 2008 09:36:02 +0300
From:	"Matti Linnanvuori" <mattilinn@...il.com>
To:	"David Miller" <davem@...emloft.net>,
	"Jeff Garzik" <jgarzik@...ox.com>
Cc:	netdev@...r.kernel.org
Subject: Re: WAN: add driver retina

From: Matti Linnanvuori <matti.linnanvuori@...om.com>

Retina G.703 and G.SHDSL driver.

Signed-off-by: Matti Linnanvuori <matti.linnanvuori@...om.com>

---

This patch is on top of linux-2.6.27-rc6-next-20080919:
http://groups.google.com/group/pcidriver/web/retina.patch

--- linux-2.6.27-rc6/MAINTAINERS	2008-10-09 09:06:54.547536585 +0300
+++ linux-2.6.27-rc6-next-20080919/MAINTAINERS	2008-10-09
09:09:28.135670455 +0300
@@ -3527,6 +3527,13 @@ REISERFS FILE SYSTEM
 L:	reiserfs-devel@...r.kernel.org
 S:	Supported

+RETINA WAN DRIVER
+P:	Matti Linnanvuori
+M:	matti.linnanvuori@...om.com
+L:	netdev@...r.kernel.org
+L:	linux-kernel@...r.kernel.org
+S:	Supported
+
 RFKILL
 P:	Ivo van Doorn
 M:	IvDoorn@...il.com
--- linux-2.6.27-rc6/drivers/net/wan/Kconfig	2008-10-09 09:19:00.577596411 +0300
+++ linux-2.6.27-rc6-next-20080919/drivers/net/wan/Kconfig	2008-10-09
09:13:02.829401392 +0300
@@ -492,4 +492,14 @@ config SBNI_MULTILINE

 	  If unsure, say N.

+config RETINA
+         tristate "Retina support"
+         depends on PCI
+         help
+           Driver for Retina C5400 and E2200 network PCI cards, which
+           support G.703, G.SHDSL with Ethernet encapsulation or
+           character device for mapping raw bitstream buffers to memory.
+           To compile this driver as a module, choose M here: the
+           module will be called retina.
+
 endif # WAN
--- linux-2.6.27-rc6/drivers/net/wan/Makefile	2008-10-09
09:06:55.195638433 +0300
+++ linux-2.6.27-rc6-next-20080919/drivers/net/wan/Makefile	2008-10-09
09:13:47.800466208 +0300
@@ -41,6 +41,7 @@ obj-$(CONFIG_C101)		+= c101.o
 obj-$(CONFIG_WANXL)		+= wanxl.o
 obj-$(CONFIG_PCI200SYN)		+= pci200syn.o
 obj-$(CONFIG_PC300TOO)		+= pc300too.o
+obj-$(CONFIG_RETINA)            += retina.o

 clean-files := wanxlfw.inc
 $(obj)/wanxl.o:	$(obj)/wanxlfw.inc
--- linux-2.6.27-rc6/drivers/net/wan/retina.h	1970-01-01
02:00:00.000000000 +0200
+++ linux-2.6.27-rc6-next-20080919/drivers/net/wan/retina.h	2008-10-09
09:14:13.856559440 +0300
@@ -0,0 +1,163 @@
+/* V1.0.0 */
+
+/*
+	Copyright (C) 2002-2003 Jouni Kujala, Flexibilis Oy.
+
+	This program is free software; you can redistribute it and/or
+	modify it under the terms of the GNU General Public License
+	as published by the Free Software Foundation; either version 2
+	of the License, or (at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+	GNU General Public License for more details.
+
+	All the drivers derived from or based on this code fall under the
+	GPL and must retain the copyright and license notice.
+*/
+
+#ifndef RETINA_H
+#define RETINA_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* char device related stuff: */
+
+#define FEPCI_SHARED_MEM_OFFSETT 0x8000
+#define FEPCI_IDENTIFICATION_OFFSETT (FEPCI_SHARED_MEM_OFFSETT+0x0)
+#define FEPCI_FEATURES_OFFSETT (FEPCI_SHARED_MEM_OFFSETT+0x40)
+#define FEPCI_SETTINGS_OFFSETT (FEPCI_SHARED_MEM_OFFSETT+0x80)
+#define FEPCI_STATUS_OFFSETT (FEPCI_SHARED_MEM_OFFSETT+0xE0)
+#define FEPCI_MAILBOX_OFFSETT (FEPCI_SHARED_MEM_OFFSETT+0x100)
+
+/* structures for ioctl calls: */
+struct fepci_ioctl_identification {
+	unsigned char data[0x20];
+};
+struct fepci_real_identification {
+	unsigned long int data[0x10];
+};
+
+struct fepci_ioctl_features {
+	unsigned char data[0x20];
+};
+struct fepci_real_features {
+	unsigned long int data[0x10];
+};
+
+struct fepci_ioctl_settings {
+	unsigned char data[0x30];
+};
+struct fepci_real_settings {
+	unsigned long int data[0x15];
+};
+
+struct fepci_ioctl_status {
+	unsigned char data[0x10];
+};
+struct fepci_real_status {
+	unsigned long int data[0x5];
+};
+
+struct fepci_ioctl_shared_mem {
+	unsigned long int data[0x80];
+};
+
+#define FEPCI_IOCTL_MAGIC 0xAA
+
+#define FEPCI_IOCTL_R_SHARED_MEM _IOR(FEPCI_IOCTL_MAGIC, 1, \
+struct fepci_ioctl_shared_mem)
+#define FEPCI_IOCTL_W_SHARED_MEM _IOW(FEPCI_IOCTL_MAGIC, 2, \
+struct fepci_ioctl_shared_mem)
+
+#define FEPCI_IOCTL_STREAM_BUFSIZE _IO(FEPCI_IOCTL_MAGIC, 3)
+#define FEPCI_IOCTL_STREAM_UNITSIZE _IO(FEPCI_IOCTL_MAGIC, 4)
+#define FEPCI_IOCTL_STREAM_OPEN _IO(FEPCI_IOCTL_MAGIC, 5)
+#define FEPCI_IOCTL_STREAM_START _IO(FEPCI_IOCTL_MAGIC, 6)
+#define FEPCI_IOCTL_STREAM_CLOSE _IO(FEPCI_IOCTL_MAGIC, 7)
+
+#define FEPCI_IOCTL_G_IDENTIFICATION _IOR(FEPCI_IOCTL_MAGIC, 0x81, \
+struct fepci_ioctl_identification)
+#define FEPCI_IOCTL_G_FEATURES _IOR(FEPCI_IOCTL_MAGIC, 0x82, \
+struct fepci_ioctl_features)
+#define FEPCI_IOCTL_G_SETTINGS _IOR(FEPCI_IOCTL_MAGIC, 0x83, \
+struct fepci_ioctl_settings)
+#define FEPCI_IOCTL_G_STATUS _IOR(FEPCI_IOCTL_MAGIC, 0x84, \
+struct fepci_ioctl_status)
+
+/* mailbox: */
+
+struct fepci_ioctl_mailbox {
+	unsigned char Semafore;
+	unsigned char Mail_number;
+	unsigned char Size;
+	unsigned char Command;
+	unsigned char Data[112];
+};
+
+struct fepci_real_mailbox {
+	__u32 Semafore_Mail_number;
+	__u32 Size_Command;
+	__u32 Data[112 / 2];
+};
+
+#define FEPCI_IOCTL_B_POLL _IO(FEPCI_IOCTL_MAGIC, 0x85)
+#define FEPCI_IOCTL_B_GRAB _IO(FEPCI_IOCTL_MAGIC, 0x86)
+#define FEPCI_IOCTL_B_RELEASE _IO(FEPCI_IOCTL_MAGIC, 0x87)
+#define FEPCI_IOCTL_B_S_CMAIL _IOW(FEPCI_IOCTL_MAGIC, 0x88, \
+struct fepci_ioctl_mailbox)
+#define FEPCI_IOCTL_B_S_QMAIL _IOW(FEPCI_IOCTL_MAGIC, 0x89, \
+struct fepci_ioctl_mailbox)
+#define FEPCI_IOCTL_B_G_MAIL _IOR(FEPCI_IOCTL_MAGIC, 0x90, \
+struct fepci_ioctl_mailbox)
+
+#define FEPCI_IOCTL_ALARM_MANAGER _IO(FEPCI_IOCTL_MAGIC, 0x91)
+#define FEPCI_IOCTL_STREAM_TRANSMIT_POLL _IO(FEPCI_IOCTL_MAGIC, 0x92)
+#define FEPCI_IOCTL_STREAM_RECEIVE_POLL _IO(FEPCI_IOCTL_MAGIC, 0x93)
+#define FEPCI_IOCTL_STREAM_BOTH_POLL _IO(FEPCI_IOCTL_MAGIC, 0x94)
+
+/* stream related stuff: */
+
+/* stream buffer address space:
+ * address: 0x 7 6 5 4   3 2 1 0
+ *              ^ ^ ^
+ *              | | |
+ *           card | area(rx/tx,0==rx,1==tx)
+ *             channel     */
+
+#define CARD_ADDRESS_SHIFT 24u
+#define CHANNEL_ADDRESS_SHIFT 20u
+#define AREA_ADDRESS_SHIFT 16u
+
+#define STREAM_BUFFER_POINTER_AREA 0x7fff0000	/* one page reserved */
+
+/* stream buffer pointers (at pointer area):
+ * address: 0x 7 6 5 4   3 2 1 0
+ *                        ^ ^ ^
+ *                        | | |
+ *                     card | area(rx/tx,0==rx,4==tx)
+ *                       channel     */
+
+#define CARD_POINTER_SHIFT 8u
+#define CHANNEL_POINTER_SHIFT 4u
+#define AREA_POINTER_SHIFT 2u
+
+/* fake pointers are for faking larger unit sizes to the user than
+ * what is the maximum internal unit size in FEPCI */
+#define USER_RX_S_FAKE_POINTER(__card, __channel, __offset) \
+((u32 *)(((__card << CARD_POINTER_SHIFT) | \
+(__channel << CHANNEL_POINTER_SHIFT) | 0x0) + __offset))
+#define USER_TX_S_FAKE_POINTER(__card, __channel, __offset) \
+((u32 *)(((__card << CARD_POINTER_SHIFT) | \
+(__channel << CHANNEL_POINTER_SHIFT) | 0x4) + __offset))
+
+#define USER_RX_S_POINTER(__card, __channel, __offset) \
+((u32 *)(((__card << CARD_POINTER_SHIFT) | \
+(__channel << CHANNEL_POINTER_SHIFT) | 0x8) + __offset))
+#define USER_TX_S_POINTER(__card, __channel, __offset) \
+((u32 *)(((__card << CARD_POINTER_SHIFT) | \
+(__channel << CHANNEL_POINTER_SHIFT) | 0xC) + __offset))
+
+#endif
--- linux-2.6.27-rc6/drivers/net/wan/retina.c	1970-01-01
02:00:00.000000000 +0200
+++ linux-2.6.27-rc6-next-20080919/drivers/net/wan/retina.c	2008-10-09
09:14:13.856559440 +0300
@@ -0,0 +1,2053 @@
+/* retina.c: */
+
+/*
+	This driver is based on:
+
+	/drivers/net/fepci.c
+	FEPCI (Frame Engine for PCI) driver for Linux operating system
+
+	Copyright (C) 2002-2003 Jouni Kujala, Flexibilis Oy.
+
+	This program is free software; you can redistribute it and/or
+	modify it under the terms of the GNU General Public License
+	as published by the Free Software Foundation; either version 2
+	of the License, or (at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+	GNU General Public License for more details.
+
+	All the drivers derived from or based on this code fall under the
+	GPL and must retain the copyright and license notice.
+*/
+
+#define MAX_TX_UNITS  256u
+#define MAX_RX_UNITS  256u
+
+#define MAX_UNIT_SZ_ORDER  10u
+
+#define TX_RING_SIZE	8u
+#define RX_RING_SIZE	8u
+
+#define CHANNELS 	4u
+
+#define RX_FIFO_THRESHOLD_PACKET_MODE 0x4
+#define TX_FIFO_THRESHOLD_PACKET_MODE 0x4
+#define TX_DESC_THRESHOLD_PACKET_MODE 0x4
+
+#define RX_FIFO_THRESHOLD_STREAM_MODE 0x4
+#define TX_FIFO_THRESHOLD_STREAM_MODE 0x7
+#define TX_DESC_THRESHOLD_STREAM_MODE 0x1
+
+#define RETINA_MRU 2000u
+#define RETINA_DMA_SIZE (RETINA_MRU + 4u)
+
+static char fepci_name[] = "retina";
+static const char fepci_alarm_manager_name[] = "retina alarm manager";
+static const char fepci_netdev_name[] = "dcpxx";
+
+static unsigned int find_cnt;
+
+/* Time in jiffies before concluding that the transmitter is hung. */
+#define TX_TIMEOUT (5 * HZ)
+
+#include "retina.h"
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/pfn.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/rtnetlink.h>
+
+#include <asm/pgtable.h>
+
+MODULE_VERSION("1.2.36");
+
+/* PCI I/O space extent */
+enum { FEPCI_SIZE = 0x20000 };
+
+static struct pci_device_id fepci_pci_tbl[] __devinitdata = {
+	{0x1FC0, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+	{0x1FC0, 0x0301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+	{0,}
+};
+
+MODULE_DESCRIPTION("Frame Engine for PCI (FEPCI)");
+MODULE_AUTHOR("Jouni Kujala");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, fepci_pci_tbl);
+
+struct retina_address {
+	struct sk_buff *skbuff;
+	DECLARE_PCI_UNMAP_ADDR(address)
+};
+
+struct fepci_ch_private {
+	struct net_device *this_dev;
+	struct tasklet_struct transmission;
+	struct fepci_desc __iomem *tx_desc; /* Transmission ring start. */
+	struct retina_address tx[TX_RING_SIZE];
+
+	unsigned int reg_txctrl;
+	unsigned char channel_number;
+	unsigned char cur_tx;	/* the next filled tx_descriptor */
+	/* in stream mode the desc which is being transmitted */
+	/* rx_descriptor where next packet transferred */
+	unsigned char cur_rx;
+	/* in stream mode the desc which is being received */
+	bool in_eth_mode;
+
+	unsigned int reg_rxctrl;
+	struct fepci_desc __iomem *rx_desc; /* Reception ring start. */
+	struct retina_address rx[RX_RING_SIZE];
+
+	struct timer_list timer;
+	struct fepci_card_private *this_card_priv;
+
+/* stream mode: */
+	unsigned char bufsize_order;	/* 10=1kB,11=2kB,12=4kB...16=64kB */
+	unsigned char unit_sz_order;	/* 8=256B...14=16kB */
+	unsigned char fake_unit_sz_order;
+	bool in_stream_mode;
+	bool stream_on;
+	unsigned char cur_tx_unit;	/* last sent tx_unit */
+	/* rx_unit where to next packet is transferred */
+	unsigned char cur_rx_unit;
+	/* char device: */
+	u32 *rx_buffer;
+	u32 *tx_buffer;
+	unsigned bufsize;
+	unsigned unit_sz;
+	unsigned units;		/* 2,4,8,16,...,256 */
+	/* fake units (and pointers) are for faking larger unit sizes to
+	 * the user than what is the maximum internal unit size in FEPCI */
+	unsigned fake_unit_sz;
+	unsigned fake_units;
+	u32 *tx_unit[MAX_TX_UNITS];
+	u32 *rx_unit[MAX_RX_UNITS];
+};
+
+struct fepci_card_private {
+	unsigned char card_number;
+	bool removed;
+	uint8_t __iomem *ioaddr;
+	struct mutex mutex;
+	struct pci_dev *pci_dev;
+	struct fepci_ch_private *ch_privates[CHANNELS];
+
+	wait_queue_head_t alarm_manager_wait_q;
+
+	wait_queue_head_t stream_receive_q;
+	wait_queue_head_t stream_transmit_q;
+	wait_queue_head_t stream_both_q;
+};
+
+enum retina_semaphore {
+	RETINA_IDLE = 0,
+	RETINA_IN_USE = 7,
+	RETINA_RESERVED_UCTRL = 0x80,
+	RETINA_RESERVED_PC = 0x40,
+	RETINA_READY_PC_REPLY = 0x21,
+	RETINA_READY_PC = 0x20,
+	RETINA_READY_UCTRL_REPLY = 0x11,
+	RETINA_READY_UCTRL = 0x10
+};
+
+/* Offsets to the FEPCI registers */
+enum fepci_offsets {
+	reg_custom = 0x40,
+
+	reg_first_int_mask = 0x80,
+	reg_first_int_status = 0xc0,
+
+	reg_first_rxctrl = 0x4000,
+	to_next_rxctrl = 0x80,
+
+	reg_first_txctrl = 0x6000,
+	to_next_txctrl = 0x80,
+
+	first_rx_desc = 0x10000,
+	to_next_ch_rx_desc = 0x200,
+
+	first_tx_desc = 0x18000,
+	to_next_ch_tx_desc = 0x200,
+};
+
+enum reg_custom_bits {
+	AM_interrupt_mask = 0x1,
+	AM_interrupt_status = 0x100,
+};
+
+enum reg_receive_control {
+	Rx_fifo_threshold = 0x7,
+	Receive_enable = 0x80000000,
+};
+
+enum reg_transmit_control {
+	Tx_fifo_threshold = 0x7,
+	Tx_desc_threshold = 0x700,
+	Transmit_enable = 0x80000000,
+};
+
+enum int_bits {
+	MaskFrameReceived = 0x01,
+	MaskRxFifoError = 0x02,
+	MaskRxFrameDroppedError = 0x04,
+	MaskFrameTransmitted = 0x40,
+	MaskTxFifoError = 0x80,
+	MaskAllInts = 0xc7,
+	IntrFrameReceived = 0x01,
+	IntrRxFifoError = 0x02,
+	IntrRxFrameDroppedError = 0x04,
+	IntrFrameTransmitted = 0x40,
+	IntrTxFifoError = 0x80,
+	IntrAllInts = 0xc7,
+};
+
+/**
+ * The reception and transmission buffer descriptors.
+ * Elements are written as 32 bit for endian portability.
+ **/
+
+struct fepci_desc {
+	u32 desc_a;
+	u32 desc_b;
+};
+
+enum desc_b_bits {
+	frame_length		= 0xFFF,
+	fifo_error		= 0x10000,
+	size_error		= 0x20000,
+	crc_error		= 0x40000,
+	octet_error		= 0x80000,
+	line_error		= 0x100000,
+	enable_transfer		= 0x80000000,
+	transfer_not_done	= 0x80000000,
+};
+
+/* Global variables (common to whole driver, all the cards): */
+static int major; /* character device major number */
+static struct fepci_card_private **card_privates;
+static unsigned long stream_pointers;
+
+static void set_int_mask(unsigned char channel, unsigned value,
+			 struct fepci_card_private *cp)
+{
+	uint8_t __iomem *address = cp->ioaddr + reg_first_int_mask;
+	const unsigned shift = 8u * channel;
+	uint32_t oldvalue = readl(address);
+	oldvalue &= ~(0xff << shift);	/* Clear bits. */
+	oldvalue |= value << shift;	/* Set bits. */
+	writel(oldvalue, address);
+}
+
+static inline void clear_int(unsigned char channel, uint32_t value,
+			     uint8_t __iomem *ioaddr)
+{
+	writel(~(value << (8 * channel)), ioaddr + reg_first_int_status);
+}
+
+static inline unsigned get_int_status(unsigned char channel,
+				      uint8_t __iomem *ioaddr)
+{
+	const uint32_t oldvalue = readl(ioaddr + reg_first_int_status);
+	return (oldvalue >> (8 * channel)) & 0xff; /* Clear other bits. */
+}
+
+static void fillregisterswith_00(uint8_t __iomem *ioaddr)
+{
+	writel(0x0, ioaddr + reg_first_rxctrl);
+	writel(0x0, ioaddr + reg_first_txctrl);
+	writel(0x0, ioaddr + reg_first_int_mask);
+	writel(0x0, ioaddr + reg_first_int_status);
+	writel(0x0, ioaddr + first_rx_desc);
+	writel(0x0, ioaddr + first_tx_desc);
+}
+
+static int fepci_open(struct net_device *dev);
+static void fepci_timer(unsigned long data);
+static void fepci_tx_timeout(struct net_device *dev);
+static int fepci_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t fepci_interrupt(int irq, void *dev_instance);
+static int fepci_close(struct net_device *dev);
+static void fepci_close_down(struct net_device *dev,
+			     struct fepci_ch_private *fp,
+			     struct fepci_card_private *card);
+static void fepci_remove_one(struct pci_dev *pdev);
+static void retina_tx(unsigned long channel);
+
+
+static int fepci_char_open(struct inode *inode, struct file *filp);
+
+/**
+ * fepci_char_mmap() - map buffers for raw bitstreams
+ * @filp:       pointer to struct file.
+ * @vma:        pointer to struct vm_area_struct.
+ *
+ * Offset STREAM_BUFFER_POINTER_AREA is used to map stream buffer pointers at
+ * the pointer area defined in retina.h.
+ * Otherwise, offset specifies a stream buffer area with the interface number
+ * shifted left by CHANNEL_ADDRESS_SHIFT and reception (0) or transmission (1)
+ * area shifted left by AREA_POINTER_SHIFT.
+ **/
+
+static int fepci_char_mmap(struct file *filp, struct vm_area_struct *vma);
+static int fepci_char_ioctl(struct inode *inode, struct file *filp,
+			    unsigned int cmd, unsigned long arg);
+
+static struct file_operations fepci_char_fops = {
+	.owner		= THIS_MODULE,
+	.ioctl		= fepci_char_ioctl,
+	.open		= fepci_char_open,
+	.mmap		= fepci_char_mmap
+};
+
+/**
+ * fepci_stream_open() - sets an interface to raw bitstream mode
+ * @cp:         pointer to struct fepci_card_private.
+ * @fp:         pointer to struct struct fepci_ch_private.
+ *
+ * Sets an interface to raw bitstream mode. Called by ioctl
+ * FEPCI_IOCTL_STREAM_OPEN. The buffer size should have been specified with
+ * ioctl FEPCI_IOCTL_STREAM_BUFSIZE and transfer unit size with
+ * ioctl FEPCI_IOCTL_STREAM_UNITSIZE.
+ **/
+static int fepci_stream_open(struct fepci_card_private *cp,
+			     struct fepci_ch_private *fp);
+
+/**
+ * fepci_stream_start() - starts raw bitstream transfer of an interface
+ * @cp:         pointer to struct fepci_card_private.
+ * @fp:         pointer to struct struct fepci_ch_private.
+ *
+ * Starts the raw bitstream transfer of an interface. Called by ioctl
+ * FEPCI_IOCTL_STREAM_START. The interface must have been set to raw
+ * bitstream mode with ioctl FEPCI_IOCTL_STREAM_OPEN.
+ **/
+static int fepci_stream_start(struct fepci_card_private *cp,
+			      struct fepci_ch_private *fp);
+
+/**
+ * fepci_stream_close() - sets an interface off of raw bitstream mode
+ * @cp:         pointer to struct fepci_card_private.
+ * @fp:         pointer to struct struct fepci_ch_private.
+ *
+ * Sets an interface off of raw bitstream mode. Called by ioctl
+ * FEPCI_IOCTL_STREAM_CLOSE. The interface should have been
+ * set to raw bitstream mode with ioctl FEPCI_IOCTL_STREAM_OPEN.
+ **/
+static int fepci_stream_close(struct fepci_card_private *cp,
+			      struct fepci_ch_private *fp);
+static int fepci_stream_close_down(struct fepci_card_private *cp,
+				   struct fepci_ch_private *fp);
+
+static inline struct fepci_card_private *retina_card(unsigned number)
+{
+	struct fepci_card_private *card;
+	rcu_read_lock();
+	card = rcu_dereference(card_privates[number]);
+	rcu_read_unlock();
+	return card;
+}
+
+static int fepci_char_open(struct inode *inode, struct file *filp)
+{
+	unsigned int minor = MINOR(inode->i_rdev);
+	struct fepci_card_private *card;
+	if (unlikely(minor >= find_cnt))
+		return -ENXIO;
+	card = retina_card(minor);
+	if (unlikely(card->removed))
+		return -ENXIO;
+	filp->private_data = card;
+	return 0;
+}
+
+static int fepci_char_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long size = vma->vm_end - vma->vm_start;
+	unsigned long virtual_address;
+	unsigned long pfn;
+	struct fepci_card_private *device = filp->private_data;
+
+	if (offset == STREAM_BUFFER_POINTER_AREA) {
+		virtual_address = stream_pointers;
+		if (size > (1ul << PAGE_SHIFT)) {
+			dev_warn(&device->pci_dev->dev,
+				 "mmap: area size over range\n");
+			return -EINVAL;
+		}
+	} else {
+		unsigned long channel = (offset >> CHANNEL_ADDRESS_SHIFT) & 0xf;
+		/* 0 = reception, 1 = transmission */
+		unsigned long area = (offset >> AREA_ADDRESS_SHIFT) & 0xf;
+		if (unlikely(device->removed))
+			goto INVALID;
+		if (area == 0ul) {
+			virtual_address = (unsigned long)
+				device->ch_privates[channel]->rx_buffer;
+		} else if (area == 1ul) {
+			virtual_address = (unsigned long)
+				device->ch_privates[channel]->tx_buffer;
+		} else {
+INVALID:
+			return -EINVAL;
+		}
+		if (unlikely(virtual_address == 0ul))
+			goto INVALID;
+	}
+	vma->vm_file = filp;
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	pfn = PFN_DOWN(virt_to_phys((void *)virtual_address));
+	return remap_pfn_range(vma, vma->vm_start, pfn, size,
+			       vma->vm_page_prot);
+}
+
+/* mmap operations end */
+
+/* char operations start */
+
+static int fepci_copy_to_user(unsigned long to, uint8_t __iomem *from,
+			      unsigned len, bool shrink)
+{
+	if (shrink) {
+		unsigned int w;
+		for (w = 0u; w < len; w += 2) {
+			uint32_t longword = readl_relaxed(from + w / 2u *
+							  sizeof(u32));
+			int error = __put_user(longword,
+					       (unsigned char __user *)
+					       (to + w));
+			if (unlikely(error))
+				return error;
+			error = __put_user(longword >> 8,
+					   (unsigned char __user *)
+					   (to + w + 1));
+			if (unlikely(error))
+				return error;
+		}
+	} else {
+		unsigned int w;
+		for (w = 0u; w < len; w += 4u) {
+			uint32_t longword = readl_relaxed(from + w);
+			int error = __put_user(longword,
+					       (uint32_t __user *)(to + w));
+			if (unlikely(error))
+				return error;
+		}
+	}
+	return 0;
+}
+
+static int fepci_copy_from_user(uint8_t __iomem *to, unsigned long from,
+				unsigned len, bool enlarge)
+{
+	if (enlarge) {
+		unsigned int w;
+		for (w = 0u; w < len; w += 2u) {
+			unsigned char temp1;
+			unsigned char temp2;
+			int error = __get_user(temp1,
+					       (unsigned char __user *)
+					       (from + w));
+			if (unlikely(error))
+				return error;
+			error = __get_user(temp2,
+					   (unsigned char __user *)
+					   (from + w + 1u));
+			if (unlikely(error))
+				return error;
+			writel(temp1 + (temp2 << 8), to + w * 2u);
+		}
+	} else {
+		unsigned int w;
+		for (w = 0u; w < len; w += 4u) {
+			uint32_t longword;
+			int error = __get_user(longword,
+					       (u32 __user *)(from + w));
+			if (unlikely(error))
+				return error;
+			writel(longword, to + w);
+		}
+	}
+	return 0;
+}
+
+static
+enum retina_semaphore get_semafore(struct fepci_real_mailbox __iomem *mailbox)
+{
+	return readb_relaxed(&mailbox->Semafore_Mail_number);
+}
+
+static void set_semafore(struct fepci_real_mailbox __iomem *mailbox,
+			 enum retina_semaphore value)
+{
+	uint32_t number = readl_relaxed(&mailbox->Semafore_Mail_number);
+	number = ((number & ~0xFF) | value) + (1u << 8);
+	writel(number, &mailbox->Semafore_Mail_number);
+}
+
+static int fepci_char_ioctl(struct inode *inode, struct file *filp,
+			    unsigned int cmd, unsigned long arg)
+{
+	unsigned int minor = MINOR(inode->i_rdev);
+	uint8_t __iomem *ioaddr;
+	struct fepci_real_mailbox __iomem *real_mailbox;
+	int retval = 0;
+	struct fepci_card_private *card = filp->private_data;
+
+	if (unlikely(card->removed))
+		return -ENXIO;
+
+	if (_IOC_DIR(cmd) & _IOC_READ)
+		if (unlikely(!access_ok(VERIFY_WRITE, (void __user *)arg,
+					_IOC_SIZE(cmd))))
+			return -EFAULT;
+	if (_IOC_DIR(cmd) & _IOC_WRITE)
+		if (unlikely(!access_ok(VERIFY_READ, (void __user *)arg,
+					_IOC_SIZE(cmd))))
+			return -EFAULT;
+
+	ioaddr = card->ioaddr;
+	real_mailbox = (struct fepci_real_mailbox __iomem *)
+			(ioaddr + FEPCI_MAILBOX_OFFSETT);
+
+	switch (cmd) {
+	case FEPCI_IOCTL_STREAM_TRANSMIT_POLL:
+		/* Here: arg == channel number. */
+		if (unlikely(arg >= CHANNELS ||
+			     !(card->ch_privates[arg]->stream_on))) {
+			return 0x2;
+		} else {
+			unsigned long pointer =
+				*USER_TX_S_FAKE_POINTER(minor, arg,
+							stream_pointers);
+			wait_event_interruptible(card->stream_transmit_q,
+						 (pointer !=
+						  *USER_TX_S_FAKE_POINTER
+						   (minor, arg,
+						    stream_pointers)));
+			return 0x1;
+		}
+		return retval;
+	case FEPCI_IOCTL_STREAM_RECEIVE_POLL:
+		/* Here: arg == channel number. */
+		if (unlikely(arg >= CHANNELS ||
+			     !(card->ch_privates[arg]->stream_on))) {
+			return 0x2;
+		} else {
+			unsigned long pointer =
+				*USER_RX_S_FAKE_POINTER(minor, arg,
+							stream_pointers);
+			wait_event_interruptible(card->stream_receive_q,
+						 (pointer !=
+						  *USER_RX_S_FAKE_POINTER
+						   (minor, arg,
+						    stream_pointers)));
+			retval = 0x1;
+		}
+		return retval;
+	case FEPCI_IOCTL_STREAM_BOTH_POLL:
+		/* Here: arg == channel number. */
+		if (unlikely(arg >= CHANNELS ||
+			     !(card->ch_privates[arg]->stream_on))) {
+			return 0x2;
+		} else {
+			unsigned long temp_tx_pointer =
+				*USER_TX_S_FAKE_POINTER(minor, arg,
+							stream_pointers);
+			unsigned long temp_rx_pointer =
+				*USER_RX_S_FAKE_POINTER(minor, arg,
+							stream_pointers);
+			wait_event_interruptible(card->stream_both_q,
+						 (temp_tx_pointer !=
+						  *USER_TX_S_FAKE_POINTER
+						  (minor, arg, stream_pointers))
+						 || (temp_rx_pointer !=
+						     *USER_RX_S_FAKE_POINTER
+						      (minor, arg,
+						       stream_pointers)));
+			retval = 0x1;
+		}
+		return retval;
+	case FEPCI_IOCTL_R_SHARED_MEM:
+		retval = fepci_copy_to_user(arg,
+					    ioaddr + FEPCI_SHARED_MEM_OFFSETT,
+					    _IOC_SIZE(cmd), false);
+		break;
+	case FEPCI_IOCTL_W_SHARED_MEM:
+		retval = fepci_copy_from_user(ioaddr + FEPCI_SHARED_MEM_OFFSETT,
+					      arg, _IOC_SIZE(cmd), false);
+		break;
+	case FEPCI_IOCTL_G_IDENTIFICATION:
+		retval = fepci_copy_to_user(arg,
+					    ioaddr +
+					    FEPCI_IDENTIFICATION_OFFSETT,
+					    _IOC_SIZE(cmd), true);
+		break;
+	case FEPCI_IOCTL_G_FEATURES:
+		retval = fepci_copy_to_user(arg, ioaddr +
+						 FEPCI_FEATURES_OFFSETT,
+					    _IOC_SIZE(cmd), true);
+		break;
+	case FEPCI_IOCTL_G_SETTINGS:
+		retval = fepci_copy_to_user(arg, ioaddr +
+						 FEPCI_SETTINGS_OFFSETT,
+					    _IOC_SIZE(cmd), true);
+		break;
+	case FEPCI_IOCTL_G_STATUS:
+		retval = fepci_copy_to_user(arg, ioaddr + FEPCI_STATUS_OFFSETT,
+					    _IOC_SIZE(cmd), true);
+		break;
+	case FEPCI_IOCTL_B_POLL:
+		mutex_lock(&card->mutex);
+		retval = get_semafore(real_mailbox);
+		mutex_unlock(&card->mutex);
+		break;
+	case FEPCI_IOCTL_B_GRAB:
+		mutex_lock(&card->mutex);
+		if (get_semafore(real_mailbox) == RETINA_IDLE) {
+			set_semafore(real_mailbox, RETINA_RESERVED_PC);
+			get_semafore(real_mailbox); /* Wait for write. */
+			msleep(1u);	/* Delay at least 1 millisecond. */
+			switch (get_semafore(real_mailbox)) {
+			case RETINA_RESERVED_PC:
+				retval = 0;
+				break;
+			case RETINA_READY_UCTRL:
+			case RETINA_READY_UCTRL_REPLY:
+			case RETINA_RESERVED_UCTRL:
+				retval = 0x1;
+				break;
+			default:
+				retval = 0xff;
+			}
+		} else {
+			switch (get_semafore(real_mailbox)) {
+			case RETINA_READY_UCTRL:
+			case RETINA_READY_UCTRL_REPLY:
+			case RETINA_RESERVED_UCTRL:
+				retval = 0x1;
+				break;
+			default:
+				retval = 0xff;
+			}
+		}
+		mutex_unlock(&card->mutex);
+		break;
+	case FEPCI_IOCTL_B_RELEASE:
+		mutex_lock(&card->mutex);
+		switch (get_semafore(real_mailbox)) {
+		case RETINA_RESERVED_PC:
+		case RETINA_READY_PC:
+			retval = 0x0;
+			set_semafore(real_mailbox, RETINA_IDLE);
+			break;
+		case RETINA_READY_PC_REPLY:
+			retval = 0x04;
+			break;
+		case RETINA_READY_UCTRL:
+		case RETINA_READY_UCTRL_REPLY:
+		case RETINA_RESERVED_UCTRL:
+			retval = 0x1;
+			break;
+		default:
+			retval = 0xff;
+		}
+		mutex_unlock(&card->mutex);
+		break;
+	case FEPCI_IOCTL_B_S_CMAIL:
+		mutex_lock(&card->mutex);
+		switch (get_semafore(real_mailbox)) {
+		case RETINA_RESERVED_PC:
+		case RETINA_READY_PC_REPLY:
+		case RETINA_READY_PC:
+			if (unlikely(_IOC_SIZE(cmd) <= 2u))
+				goto DEFAULT;
+			/* Copy the mailbox. */
+			retval = fepci_copy_from_user(ioaddr +
+						FEPCI_MAILBOX_OFFSETT + 4u,
+						arg + 2ul, _IOC_SIZE(cmd) - 2u,
+						true);
+			/* Semaphore -> 10. */
+			set_semafore(real_mailbox, RETINA_READY_UCTRL);
+			break;
+		case RETINA_READY_UCTRL:
+		case RETINA_READY_UCTRL_REPLY:
+		case RETINA_RESERVED_UCTRL:
+			retval = 0x1;
+			break;
+		case RETINA_IDLE:
+			retval = 0x3;
+			break;
+		default:
+			retval = 0xff;
+		}
+		mutex_unlock(&card->mutex);
+		break;
+	case FEPCI_IOCTL_B_S_QMAIL:
+		mutex_lock(&card->mutex);
+		switch (get_semafore(real_mailbox)) {
+		case RETINA_RESERVED_PC:
+		case RETINA_READY_PC_REPLY:
+		case RETINA_READY_PC:
+			if (unlikely(_IOC_SIZE(cmd) <= 2u))
+				goto DEFAULT;
+			/* Copy the mailbox; */
+			retval = fepci_copy_from_user(ioaddr +
+						FEPCI_MAILBOX_OFFSETT + 4u,
+						arg + 2ul, _IOC_SIZE(cmd) - 2u,
+						true);
+			/* Semaphore -> 11 */
+			set_semafore(real_mailbox, RETINA_READY_UCTRL_REPLY);
+			break;
+		case RETINA_READY_UCTRL:
+		case RETINA_READY_UCTRL_REPLY:
+		case RETINA_RESERVED_UCTRL:
+			retval = 0x1;
+			break;
+		case RETINA_IDLE:
+			retval = 0x3;
+			break;
+		default:
+DEFAULT:		retval = 0xff;
+		}
+		mutex_unlock(&card->mutex);
+		break;
+	case FEPCI_IOCTL_B_G_MAIL:
+		mutex_lock(&card->mutex);
+		switch (get_semafore(real_mailbox)) {
+		case RETINA_READY_UCTRL:
+		case RETINA_READY_UCTRL_REPLY:
+		case RETINA_RESERVED_UCTRL:
+			retval = 0x1;
+			break;
+		case RETINA_RESERVED_PC:
+		case RETINA_READY_PC_REPLY:
+		case RETINA_READY_PC:
+			retval = fepci_copy_to_user(arg,
+						ioaddr +
+						FEPCI_MAILBOX_OFFSETT,
+						_IOC_SIZE(cmd), true);
+			break;
+		case RETINA_IDLE:
+			retval = 0x3;
+			break;
+		default:
+			retval = 0xff;
+		}
+		if (unlikely(retval != 0)) {
+			/* copy four lowest bytes from the mailbox */
+			retval = fepci_copy_to_user(arg,
+						ioaddr + FEPCI_MAILBOX_OFFSETT,
+						4, 1);
+			if (likely(retval == 0))
+				/* lowest byte = 0x7 */
+				retval = __put_user(0x7, (char __user *)arg);
+		}
+		mutex_unlock(&card->mutex);
+		break;
+	case FEPCI_IOCTL_ALARM_MANAGER:
+		interruptible_sleep_on(&(card->alarm_manager_wait_q));
+		return retval;
+	case FEPCI_IOCTL_STREAM_BUFSIZE:
+		mutex_lock(&card->mutex);
+		{
+			struct fepci_ch_private *fp =
+				card->ch_privates[arg & 3ul];
+			if (fp->in_stream_mode)
+				retval = -EBUSY;
+			else
+				fp->bufsize_order = arg >> 2;
+		}
+		mutex_unlock(&card->mutex);
+		break;
+	case FEPCI_IOCTL_STREAM_UNITSIZE:
+		mutex_lock(&card->mutex);
+		{
+			struct fepci_ch_private *fp =
+				card->ch_privates[arg & 3ul];
+			if (fp->in_stream_mode)
+				retval = -EBUSY;
+			else
+				fp->fake_unit_sz_order = arg >> 2;
+		}
+		mutex_unlock(&card->mutex);
+		break;
+	case FEPCI_IOCTL_STREAM_OPEN:
+		return fepci_stream_open(card, card->ch_privates[arg]);
+	case FEPCI_IOCTL_STREAM_START:
+		return fepci_stream_start(card, card->ch_privates[arg]);
+	case FEPCI_IOCTL_STREAM_CLOSE:
+		return fepci_stream_close(card, card->ch_privates[arg]);
+	default:
+		return -ENOTTY;
+	}
+	return retval;
+}
+
+static int fepci_register_char_device(void)
+{
+	int error =
+		register_chrdev(0u /* dynamic */, fepci_name, &fepci_char_fops);
+	if (unlikely(error < 0))
+		printk(KERN_WARNING
+		       "%s: unable to register char device\n", fepci_name);
+	return error;
+}
+
+static void fepci_unregister_char_device(void)
+{
+	unregister_chrdev(major, fepci_name);
+}
+
+/* char operations end */
+
+/* stream operations start */
+
+static irqreturn_t fepci_stream_interrupt(int irq, void *dev_instance);
+
+static int fepci_stream_close(struct fepci_card_private *cp,
+			      struct fepci_ch_private *fp)
+{
+	int error;
+	mutex_lock(&cp->mutex);
+	error = fepci_stream_close_down(cp, fp);
+	mutex_unlock(&cp->mutex);
+	return error;
+}
+
+static int fepci_stream_open(struct fepci_card_private *cp,
+			     struct fepci_ch_private *fp)
+{
+	unsigned tx_pages, rx_pages, tx_order, rx_order;
+	unsigned int u;
+	mutex_lock(&cp->mutex);
+
+	if (unlikely(fp->in_eth_mode)) {
+		dev_warn(&fp->this_dev->dev,
+			 "Interface is in Ethernet mode: "
+			 "cannot open stream interface\n");
+		mutex_unlock(&cp->mutex);
+		return -EBUSY;
+	}
+	if (unlikely(fp->in_stream_mode)) {
+		mutex_unlock(&cp->mutex);
+		return 0;
+	}
+
+	if (unlikely(cp->removed)) {
+		mutex_unlock(&cp->mutex);
+		return -ENXIO;
+	}
+
+	fp->bufsize = 1u << fp->bufsize_order;
+
+	if (unlikely(fp->fake_unit_sz_order < 5u)) {
+		dev_warn(&fp->this_dev->dev,
+			 "Unit size has to be at least 32 bytes\n");
+INVALID:
+		mutex_unlock(&cp->mutex);
+		return -EINVAL;
+	}
+
+	if (unlikely(fp->fake_unit_sz_order >= fp->bufsize_order)) {
+		dev_warn(&fp->this_dev->dev,
+			 "Bufsize has to be greater than unit size\n");
+		goto INVALID;
+	}
+
+	if (fp->fake_unit_sz_order >= MAX_UNIT_SZ_ORDER)
+		fp->unit_sz_order = MAX_UNIT_SZ_ORDER;
+	else
+		fp->unit_sz_order = fp->fake_unit_sz_order;
+
+	fp->fake_unit_sz = 1u << fp->fake_unit_sz_order;
+	fp->unit_sz = 1u << fp->unit_sz_order;
+	fp->units = 1u << (fp->bufsize_order - fp->unit_sz_order);
+	fp->fake_units = 1u << (fp->bufsize_order - fp->fake_unit_sz_order);
+
+	/* Reserve memory. */
+	if (fp->bufsize_order < PAGE_SHIFT) {
+		rx_order = 0u;
+		tx_order = 0u;
+		rx_pages = 1u;
+		tx_pages = 1u;
+	} else {
+		tx_order = fp->bufsize_order - PAGE_SHIFT;
+		tx_pages = 1u << tx_order;
+		rx_order = tx_order + 1u;
+		rx_pages = 1u << rx_order;
+	}
+	fp->in_stream_mode = true;
+	fp->tx_buffer = (u32 *)__get_free_pages(GFP_KERNEL, tx_order);
+	if (unlikely(fp->tx_buffer == NULL))
+		goto NO_MEMORY;
+	fp->rx_buffer = (u32 *)__get_free_pages(GFP_KERNEL, rx_order);
+	if (unlikely(fp->rx_buffer == NULL)) {
+NO_MEMORY:
+		dev_warn(&fp->this_dev->dev,
+			 "unable to allocate memory for buffers\n");
+		fepci_stream_close_down(cp, fp);
+		mutex_unlock(&cp->mutex);
+		return -ENOMEM;
+	}
+
+	for (u = 0u; u < fp->bufsize / 4u; u++)
+		fp->tx_buffer[u] = 0xffffffff;
+
+	*USER_RX_S_POINTER(cp->card_number, fp->channel_number, stream_pointers)
+		= 0ul;
+	*USER_TX_S_POINTER(cp->card_number, fp->channel_number, stream_pointers)
+		= 0ul;
+	*USER_RX_S_FAKE_POINTER(cp->card_number,
+				fp->channel_number, stream_pointers) = 0ul;
+	*USER_TX_S_FAKE_POINTER(cp->card_number,
+				fp->channel_number, stream_pointers) = 0ul;
+
+	/* Init the ring buffers. */
+	for (u = 0u; u < MAX_RX_UNITS; u++)
+		fp->rx_unit[u] = fp->rx_buffer + fp->unit_sz * u / sizeof(u32);
+	for (u = 0u; u < MAX_TX_UNITS; u++)
+		fp->tx_unit[u] = fp->tx_buffer + fp->unit_sz * u / sizeof(u32);
+
+	for (u = 0u; u < RX_RING_SIZE; u++) {
+		writel(0u, &fp->rx_desc[u].desc_a);
+		writel(0u, &fp->rx_desc[u].desc_b);
+	}
+	for (u = 0u; u < TX_RING_SIZE; u++) {
+		writel(0u, &fp->tx_desc[u].desc_a);
+		writel(0u, &fp->tx_desc[u].desc_b);
+	}
+	mutex_unlock(&cp->mutex);
+	return 0;
+}
+
+static int fepci_stream_start(struct fepci_card_private *cp,
+			      struct fepci_ch_private *fp)
+{
+	unsigned i;
+	uint8_t __iomem *ioaddr = cp->ioaddr;
+	struct net_device *dev = fp->this_dev;
+	mutex_lock(&cp->mutex);
+	if (unlikely(!fp->in_stream_mode)) {
+		mutex_unlock(&cp->mutex);
+		dev_warn(&dev->dev,
+			 "interface is not in stream mode: "
+			 "streaming cannot be started\n");
+		return -EBUSY;
+	}
+	if (unlikely(fp->stream_on)) {
+		mutex_unlock(&cp->mutex);
+		return 0;
+	} else {
+		/* Reserve IRQ. */
+		int error = request_irq(dev->irq, &fepci_stream_interrupt,
+					IRQF_SHARED, dev->name, dev);
+		if (unlikely(error)) {
+			mutex_unlock(&cp->mutex);
+			dev_warn(&dev->dev,
+				 "unable to allocate IRQ %d, error %d\n",
+				 dev->irq, error);
+			return -ENOMEM;
+		}
+	}
+
+	fp->stream_on = true;
+
+	/* Sending and receiving on, start from the beginning of the buffer. */
+	fp->cur_tx_unit = 0u;
+	fp->cur_rx_unit = 0u;
+	fp->cur_tx = 0u;
+	fp->cur_rx = 0u;
+
+	/* All the descriptors ready to go: */
+	for (i = 0; i < min(RX_RING_SIZE, TX_RING_SIZE); i++) {
+		dma_addr_t address = pci_map_single(cp->pci_dev,
+						    fp->
+						    rx_unit[(fp->cur_rx_unit +
+							     i) % fp->units],
+						    fp->unit_sz,
+						    PCI_DMA_FROMDEVICE);
+		if (unlikely(pci_dma_mapping_error(cp->pci_dev, address))) {
+			dev_warn(&dev->dev,
+				 "failed to map reception DMA buffer\n");
+		} else {
+			unsigned next = (fp->cur_rx + i) & (RX_RING_SIZE - 1);
+			pci_unmap_addr_set(fp->rx + next, address, address);
+			writel(address, &fp->rx_desc[next].desc_a);
+			if (!(readl(&fp->rx_desc[next].desc_b) &
+			      enable_transfer))
+				writel(enable_transfer,
+				       &fp->rx_desc[next].desc_b);
+		}
+		address = pci_map_single(cp->pci_dev,
+					 fp->tx_unit[(fp->cur_tx_unit + i) %
+					 fp->units], fp->unit_sz,
+					 PCI_DMA_TODEVICE);
+		if (unlikely(pci_dma_mapping_error(cp->pci_dev, address))) {
+			dev_warn(&dev->dev,
+				 "failed to map transmission DMA buffer\n");
+		} else {
+			unsigned next = (fp->cur_tx + i) & (TX_RING_SIZE - 1);
+			pci_unmap_addr_set(fp->tx + next, address, address);
+			writel(address, &fp->tx_desc[next].desc_a);
+			if (!(readl_relaxed(&fp->tx_desc[next].desc_b) &
+			      enable_transfer))
+				writel(enable_transfer |
+				       (fp->unit_sz & frame_length),
+				       &fp->tx_desc[next].desc_b);
+		}
+	}
+
+	/* irq on */
+	set_int_mask(fp->channel_number,
+		     MaskFrameReceived | MaskFrameTransmitted |
+		     MaskRxFifoError | MaskRxFrameDroppedError |
+		     MaskTxFifoError, cp);
+	/* Start Rx and Tx channels */
+	writel(Receive_enable |
+	       (Rx_fifo_threshold & RX_FIFO_THRESHOLD_STREAM_MODE),
+	       ioaddr + fp->reg_rxctrl);
+	writel((Transmit_enable |
+		(Tx_desc_threshold &
+		 (TX_DESC_THRESHOLD_STREAM_MODE << 8)) |
+		(Tx_fifo_threshold & TX_FIFO_THRESHOLD_STREAM_MODE)),
+	       ioaddr + fp->reg_txctrl);
+	mutex_unlock(&cp->mutex);
+	return 0;
+}
+
+static inline void fepci_stream_stop(struct fepci_card_private *cp,
+				     struct fepci_ch_private *fp)
+{
+	uint8_t __iomem *ioaddr = cp->ioaddr;
+	unsigned d = min(RX_RING_SIZE, TX_RING_SIZE) - 1;
+	struct net_device *dev = fp->this_dev;
+	fp->stream_on = false;
+	/* Stop Rx and Tx channels. */
+	writel(0x0, ioaddr + fp->reg_rxctrl);
+	writel(0x0, ioaddr + fp->reg_txctrl);
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	set_int_mask(fp->channel_number, 0x0, cp);
+
+	/* unregister irq */
+	free_irq(dev->irq, dev);
+
+	do {
+		if (likely(!pci_dma_mapping_error(cp->pci_dev,
+					pci_unmap_addr(fp->rx + d, address))))
+			pci_unmap_single(cp->pci_dev,
+					 pci_unmap_addr(fp->rx + d, address),
+					 fp->unit_sz,
+					 PCI_DMA_FROMDEVICE);
+		if (likely(!pci_dma_mapping_error(cp->pci_dev,
+					pci_unmap_addr(fp->tx + d, address))))
+			pci_unmap_single(cp->pci_dev,
+					 pci_unmap_addr(fp->tx + d, address),
+					 fp->unit_sz, PCI_DMA_TODEVICE);
+	} while (d--);
+}
+
+static int fepci_stream_close_down(struct fepci_card_private *cp,
+				   struct fepci_ch_private *fp)
+{
+	unsigned rx_pages, tx_pages, rx_order, tx_order;
+	if (unlikely(!(fp->in_stream_mode)))
+		return -EBUSY;
+	fepci_stream_stop(cp, fp);
+	/* release memory */
+	if (fp->bufsize_order < PAGE_SHIFT) {
+		rx_order = 0u;
+		tx_order = 0u;
+		rx_pages = 1u;
+		tx_pages = 1u;
+	} else {
+		rx_order = fp->bufsize_order - PAGE_SHIFT + 1u;
+		rx_pages = 1u << rx_order;
+		tx_order = fp->bufsize_order - PAGE_SHIFT;
+		tx_pages = 1u << tx_order;
+	}
+	if (fp->rx_buffer) {
+		free_pages((unsigned long)fp->rx_buffer, rx_order);
+		fp->rx_buffer = NULL;
+	}
+	if (fp->tx_buffer) {
+		free_pages((unsigned long)fp->tx_buffer, tx_order);
+		fp->tx_buffer = NULL;
+	}
+	fp->in_stream_mode = false;
+	return 0;
+}
+
+static irqreturn_t fepci_stream_interrupt(int irq, void *dev_instance)
+{
+	struct net_device *dev = dev_instance;
+	struct fepci_ch_private *fp = netdev_priv(dev);
+	uint8_t __iomem *ioaddr = (uint8_t __iomem *)dev->base_addr;
+	const unsigned char channel = fp->channel_number;
+	const uint32_t intr_status = get_int_status(channel, ioaddr);
+	unsigned int temp_rx;
+	unsigned int temp_rx_unit;
+	unsigned int temp_tx;
+	unsigned int temp_tx_unit;
+	if (!intr_status)
+		return IRQ_NONE;
+	clear_int(channel, intr_status, ioaddr);
+	temp_rx = fp->cur_rx;
+	while (!(readl(&fp->rx_desc[fp->cur_rx].desc_b) & transfer_not_done)
+		/* Has been received. */ &&
+		/* Stop if made one round. */
+	       temp_rx != ((fp->cur_rx + 1u) & (RX_RING_SIZE - 1u))) {
+		if (likely(!pci_dma_mapping_error(fp->this_card_priv->pci_dev,
+						  pci_unmap_addr(fp->rx +
+								 fp->cur_rx,
+								 address))))
+			pci_unmap_single(fp->this_card_priv->pci_dev,
+					 pci_unmap_addr(fp->rx + fp->cur_rx,
+							address),
+					 fp->unit_sz, PCI_DMA_FROMDEVICE);
+		fp->cur_rx = (fp->cur_rx + 1u) & (RX_RING_SIZE - 1u);
+		fp->cur_rx_unit = (fp->cur_rx_unit + 1u);
+		fp->cur_rx_unit *= fp->cur_rx_unit < fp->units;
+		*USER_RX_S_POINTER(fp->this_card_priv->card_number,
+				   fp->channel_number,
+				   stream_pointers) = fp->cur_rx_unit;
+		*USER_RX_S_FAKE_POINTER(fp->this_card_priv->card_number,
+					fp->channel_number,
+					stream_pointers) =
+			fp->cur_rx_unit * fp->unit_sz / fp->fake_unit_sz;
+		wake_up_interruptible(&(fp->this_card_priv->stream_receive_q));
+		wake_up_interruptible(&(fp->this_card_priv->stream_both_q));
+	}
+	/* From the first uninitialized descriptor to cur_rx. */
+	temp_rx = (fp->cur_rx + 1u) & (RX_RING_SIZE - 1u);
+	temp_rx_unit = (fp->cur_rx_unit + 1);
+	temp_rx_unit *= temp_rx_unit < fp->units;
+	while (temp_rx != fp->cur_rx) {
+		uint32_t desc_b = readl(&fp->rx_desc[temp_rx].desc_b);
+		if (!(desc_b & transfer_not_done)) {
+			dma_addr_t bus_address;
+			/* Update debug counters. */
+			if (unlikely(desc_b & fifo_error)) {
+				dev->stats.rx_errors++;
+				dev->stats.rx_fifo_errors++;
+			} else if (unlikely(desc_b & size_error)) {
+				dev->stats.rx_errors++;
+				dev->stats.rx_over_errors++;
+			} else if (unlikely(desc_b & octet_error)) {
+				dev->stats.rx_errors++;
+				dev->stats.rx_length_errors++;
+			} else if (unlikely(desc_b & line_error)) {
+				dev->stats.rx_errors++;
+				dev->stats.rx_missed_errors++;
+			} else {
+				dev->stats.rx_packets++;
+				dev->stats.rx_bytes += fp->unit_sz;
+			}
+			/* Initialize the descriptor for transfer. */
+			bus_address =
+				pci_map_single(fp->this_card_priv->pci_dev,
+					       fp->rx_unit[temp_rx_unit],
+					       fp->unit_sz, PCI_DMA_FROMDEVICE);
+			if (likely(!pci_dma_mapping_error(fp->this_card_priv->
+									pci_dev,
+							  bus_address))) {
+				pci_unmap_addr_set(fp->rx + temp_rx, address,
+						   bus_address);
+				writel(bus_address,
+					&fp->rx_desc[temp_rx].desc_a);
+				writel(enable_transfer,
+					&fp->rx_desc[temp_rx].desc_b);
+			} else {
+				dev_warn(&dev->dev,
+					 "failed to map DMA for reception\n");
+			}
+		}
+		temp_rx = (temp_rx + 1u) & (RX_RING_SIZE - 1u);
+		temp_rx_unit = (temp_rx_unit + 1u);
+		temp_rx_unit *= temp_rx_unit < fp->units;
+	}
+
+	temp_tx = fp->cur_tx;
+	while (!(readl_relaxed(&fp->tx_desc[fp->cur_tx].desc_b) &
+		transfer_not_done) /* Has been transmitted. */ &&
+		/* Stop if made one round. */
+	       temp_tx != ((fp->cur_tx + 1u) & (TX_RING_SIZE - 1u))) {
+		if (likely(!pci_dma_mapping_error(fp->this_card_priv->pci_dev,
+						  pci_unmap_addr(fp->tx +
+								 fp->cur_tx,
+								 address))))
+			pci_unmap_single(fp->this_card_priv->pci_dev,
+					 pci_unmap_addr(fp->tx + fp->cur_tx,
+							address),
+					 fp->unit_sz, PCI_DMA_TODEVICE);
+		fp->cur_tx = (fp->cur_tx + 1u) & (TX_RING_SIZE - 1u);
+		fp->cur_tx_unit = (fp->cur_tx_unit + 1u);
+		fp->cur_tx_unit *= fp->cur_tx_unit < fp->units;
+			*USER_TX_S_POINTER(fp->this_card_priv->card_number,
+					   fp->channel_number,
+					   stream_pointers) = fp->cur_tx_unit;
+		*USER_TX_S_FAKE_POINTER(fp->this_card_priv->
+					card_number,
+					fp->channel_number,
+					stream_pointers) =
+			fp->cur_tx_unit * fp->unit_sz / fp->fake_unit_sz;
+		wake_up_interruptible(&(fp->this_card_priv->
+					stream_transmit_q));
+		wake_up_interruptible(&(fp->this_card_priv->
+					stream_both_q));
+	}
+
+	/* From the first uninitialized descriptor to cur_tx. */
+	temp_tx = (fp->cur_tx + 1u) & (TX_RING_SIZE - 1u);
+	temp_tx_unit = (fp->cur_tx_unit + 1u);
+	temp_tx_unit *= temp_tx_unit < fp->units;
+
+	while (temp_tx != fp->cur_tx) {
+		uint32_t desc_b = readl_relaxed(&fp->tx_desc[temp_tx].desc_b);
+		if (!(desc_b & transfer_not_done)) {
+			dma_addr_t bus_address;
+			/* Update statistics. */
+			if (unlikely(desc_b & fifo_error)) {
+				dev->stats.tx_errors++;
+				dev->stats.tx_fifo_errors++;
+			} else {
+				dev->stats.tx_packets++;
+				dev->stats.tx_bytes += fp->unit_sz;
+			}
+			/* Initialize the descriptor for transfer. */
+			bus_address =
+				pci_map_single(fp->this_card_priv->pci_dev,
+					       fp->tx_unit[temp_tx_unit],
+					       fp->unit_sz, PCI_DMA_TODEVICE);
+			if (likely(!pci_dma_mapping_error(fp->this_card_priv->
+									pci_dev,
+							  bus_address))) {
+				pci_unmap_addr_set(fp->tx + temp_tx,
+						   address, bus_address);
+				writel(bus_address,
+				       &fp->tx_desc[temp_tx].desc_a);
+				writel(enable_transfer |
+				       (fp->unit_sz & frame_length),
+				       &fp->tx_desc[temp_tx].desc_b);
+			} else {
+				dev_warn(&dev->dev,
+					 "failed to map transmission DMA\n");
+			}
+		}
+		temp_tx = (temp_tx + 1u) & (TX_RING_SIZE - 1u);
+		temp_tx_unit = (temp_tx_unit + 1u);
+		temp_tx_unit *= temp_tx_unit < fp->units;
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* stream operations end */
+
+static inline u16 get_common_reg_word(uint8_t __iomem *ioaddr,
unsigned offsett)
+{
+	u16 word = readw_relaxed(ioaddr + FEPCI_IDENTIFICATION_OFFSETT +
+				 (offsett << 1));
+	return word;
+}
+
+static irqreturn_t alarm_manager_interrupt(int irq, void *pointer)
+{
+	struct fepci_card_private *card_private = pointer;
+	uint8_t __iomem *ioaddr_reg_custom = card_private->ioaddr + reg_custom;
+	if (readl_relaxed(ioaddr_reg_custom) & AM_interrupt_status) {
+		/* clear interrupt (zero everything but the mask bit) */
+		writel(AM_interrupt_mask, ioaddr_reg_custom);
+		/* wake queue */
+		wake_up(&(card_private->alarm_manager_wait_q));
+		return IRQ_HANDLED;
+	} else {
+		return IRQ_NONE;
+	}
+}
+
+static int __devinit fepci_init_one(struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	int i;
+	unsigned j;
+	uint8_t __iomem *ioaddr;
+	unsigned position = 0u;
+	struct fepci_card_private *card_private, **cards, **old;
+	for (; position < find_cnt; position++) {
+		card_private = card_privates[position];
+		if (card_private->pci_dev == NULL) {
+			card_private->pci_dev = pdev;
+			goto FOUND;
+		}
+	}
+	if (unlikely(find_cnt == 256u))
+		return -ENOMEM;
+	cards = kmalloc((find_cnt + 1u) * sizeof(struct fepci_card_private *),
+			GFP_KERNEL);
+	if (cards == NULL)
+		return -ENOMEM;
+	card_private = kzalloc(sizeof(struct fepci_card_private), GFP_KERNEL);
+	if (card_private == NULL) {
+		kfree(cards);
+		return -ENOMEM;
+	}
+	card_private->removed = true;
+	init_waitqueue_head(&(card_private->alarm_manager_wait_q));
+	init_waitqueue_head(&(card_private->stream_transmit_q));
+	init_waitqueue_head(&(card_private->stream_receive_q));
+	init_waitqueue_head(&(card_private->stream_both_q));
+	card_private->card_number = find_cnt;
+	mutex_init(&card_private->mutex);
+	cards[find_cnt] = card_private;
+	memcpy(cards, card_privates,
+	       sizeof(struct fepci_card_private *) * find_cnt);
+	old = card_privates;
+	rcu_assign_pointer(card_privates, cards);
+	synchronize_rcu();
+	kfree(old);
+	find_cnt++;
+FOUND:
+	if (PCI_FUNC(pdev->devfn) != 0u)
+		return -ENXIO;
+	i = pci_enable_device(pdev);
+	if (unlikely(i)) {
+		dev_warn(&pdev->dev, "enabling error %d\n", i);
+		return i;
+	}
+	pci_set_master(pdev);
+	i = pci_request_regions(pdev, fepci_name);
+	if (unlikely(i)) {
+		dev_warn(&pdev->dev, "requesting regions error %d\n", i);
+		pci_disable_device(pdev);
+		return i;
+	}
+	i = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+	if (unlikely(i)) {
+		dev_warn(&pdev->dev, "no suitable DMA available\n");
+		goto ERR_1;
+	}
+	if (unlikely(pci_resource_len(pdev, 0u) < FEPCI_SIZE)) {
+		dev_warn(&pdev->dev, "resource length less than required %u\n",
+			 FEPCI_SIZE);
+		i = -ENXIO;
+		goto ERR_1;
+	}
+	if (unlikely(!(pci_resource_flags(pdev, 0u) & IORESOURCE_MEM))) {
+		i = -ENXIO;
+		goto ERR_1;
+	}
+	ioaddr = pci_iomap(pdev, 0u, FEPCI_SIZE);
+	if (unlikely(!ioaddr)) {
+		dev_warn(&pdev->dev, "mapping failed\n");
+		i = -ENOMEM;
+		goto ERR_1;
+	}
+	pci_set_drvdata(pdev, card_private);
+	card_private->ioaddr = ioaddr;
+	card_private->pci_dev = pdev;
+	fillregisterswith_00(ioaddr);
+	i = request_irq(pdev->irq, &alarm_manager_interrupt,
+			IRQF_SHARED, fepci_alarm_manager_name, card_private);
+	if (unlikely(i)) {
+		dev_warn(&pdev->dev,
+			 "unable to allocate alarm manager IRQ %u: %d\n",
+			 pdev->irq, i);
+		goto ERR_2;
+	}
+	/* Alarm manager interrupt on: */
+	writel(AM_interrupt_mask, ioaddr + reg_custom);
+	for (j = 0; j < CHANNELS; j++) {
+		char *name;
+		struct fepci_ch_private *fp;
+		struct net_device *dev =
+			alloc_etherdev(sizeof(struct fepci_ch_private));
+		struct fepci_real_mailbox __iomem *real_mailbox =
+			(struct fepci_real_mailbox __iomem *)
+			(ioaddr + FEPCI_MAILBOX_OFFSETT);
+		unsigned long waituntil;
+		uint8_t *address = dev->dev_addr;
+		if (unlikely(!dev)) {
+			dev_warn(&pdev->dev,
+				 "cannot allocate Ethernet device\n");
+			continue;
+		}
+		fp = netdev_priv(dev);
+		card_private->ch_privates[j] = fp;
+		name = dev->name;
+		/* name := xxx01..xxxnn */
+		memcpy(name, fepci_netdev_name, 6u);
+		/* dev->name[3]= j+'0';    channel number -> ascii */
+		/* minor number -> ascii */
+		name[4] = ((position * CHANNELS + j) % 10u) + '0';
+		/* minor number -> ascii */
+		name[3] = ((position * CHANNELS + j) / 10u) + '0';
+		clear_int(j, IntrAllInts, ioaddr);
+		ether_setup(dev);
+		/* HW_ADDR is got using the mailbox: */
+		set_semafore(real_mailbox, RETINA_RESERVED_PC);
+		writel(0x1 /* size */ + (0x8 << 8) /* get mac command */,
+		       &real_mailbox->Size_Command);
+		set_semafore(real_mailbox, RETINA_READY_UCTRL_REPLY);
+		waituntil = jiffies + HZ;
+		while (time_before(jiffies, waituntil) &&
+		       get_semafore(real_mailbox) != RETINA_READY_PC)
+			msleep(1u);
+		if (get_semafore(real_mailbox) == RETINA_READY_PC) {
+			u32 __iomem *data = real_mailbox->Data + 3u * j;
+			address[5] = readb_relaxed(data);
+			address[4] = readb_relaxed((u8 __iomem *)data + 1u);
+			address[3] = readb_relaxed(++data);
+			address[2] = readb_relaxed((u8 __iomem *)data + 1u);
+			address[1] = readb_relaxed(++data);
+			address[0] = readb_relaxed((u8 __iomem *)data + 1u);
+			if (unlikely(!is_valid_ether_addr(address)))
+				goto RANDOM;
+		} else {
+RANDOM:			random_ether_addr(address);
+		}
+		set_semafore(real_mailbox, RETINA_IDLE);
+		dev->addr_len = 6u;
+		dev->base_addr = (unsigned long)ioaddr;
+		dev->irq = pdev->irq;
+		fp->rx_desc = (struct fepci_desc __iomem *)
+			(ioaddr + first_rx_desc + j * to_next_ch_rx_desc);
+		fp->tx_desc = (struct fepci_desc __iomem *)
+			(ioaddr + first_tx_desc + j * to_next_ch_tx_desc);
+		fp->channel_number = j;	/* The channel in this device. */
+		fp->this_dev = dev;
+		fp->this_card_priv = card_private;
+		fp->cur_tx = 0u;
+		fp->in_stream_mode = false;
+		fp->in_eth_mode = false;
+		fp->reg_rxctrl = reg_first_rxctrl + j * to_next_rxctrl;
+		fp->reg_txctrl = reg_first_txctrl + j * to_next_txctrl;
+		/* The specific entries in the device structure. */
+		dev->open = &fepci_open;
+		dev->hard_start_xmit = &fepci_start_xmit;
+		dev->stop = &fepci_close;
+		dev->tx_timeout = fepci_tx_timeout;
+		dev->watchdog_timeo = TX_TIMEOUT;
+		tasklet_init(&fp->transmission, retina_tx, (unsigned long)fp);
+		dev->flags |= IFF_POINTOPOINT;
+		dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+		SET_NETDEV_DEV(dev, &pdev->dev);
+		i = register_netdev(dev);
+		if (unlikely(i)) {
+			dev_warn(&dev->dev, "register_netdev failed: %d\n", i);
+			continue;
+		}
+	}
+	smp_wmb(); /* Set removed false after initialization. */
+	card_private->removed = false;
+	return 0;
+ERR_2:
+	iounmap(ioaddr);
+	card_private->pci_dev = NULL;
+	pci_set_drvdata(pdev, NULL);
+ERR_1:
+	pci_disable_device(pdev);
+	pci_release_regions(pdev);
+	return i;
+}
+
+/* Initialize the reception and transmission ring buffers. */
+static inline void fepci_init_ring(struct net_device *dev)
+{
+	struct fepci_ch_private *fp = netdev_priv(dev);
+	unsigned d;
+
+	for (d = 0u; d < RX_RING_SIZE; d++) {
+		struct sk_buff *skb = __netdev_alloc_skb(dev,
+							 RETINA_DMA_SIZE +
+							 NET_IP_ALIGN,
+							 GFP_KERNEL);
+		if (unlikely(skb == NULL)) {
+ZERO:
+			writel(0u, &fp->rx_desc[d].desc_a);
+			writel(0u, &fp->rx_desc[d].desc_b);
+		} else {
+			dma_addr_t bus_address;
+			skb_reserve(skb, NET_IP_ALIGN);
+			bus_address =
+				pci_map_single(fp->this_card_priv->pci_dev,
+						skb->data, RETINA_MRU,
+						PCI_DMA_FROMDEVICE);
+			if (likely(!pci_dma_mapping_error(fp->this_card_priv->
+									pci_dev,
+							  bus_address))) {
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+				fp->rx[d].skbuff = skb;
+				pci_unmap_addr_set(fp->rx + d, address,
+						   bus_address);
+				writel(bus_address, &fp->rx_desc[d].desc_a);
+				writel(enable_transfer, &fp->rx_desc[d].desc_b);
+			} else {
+				dev_kfree_skb(skb);
+				goto ZERO;
+			}
+		}
+	}
+
+	for (d = 0u; d < TX_RING_SIZE; d++) {
+		fp->tx[d].skbuff = NULL;
+		writel(0u, &fp->tx_desc[d].desc_a); /* No address. */
+		/* No transfer enable, no interrupt enable. */
+		writel(0u, &fp->tx_desc[d].desc_b);
+	}
+}
+
+static int fepci_open_down(struct net_device *dev, struct fepci_ch_private *fp)
+{
+	uint8_t __iomem *ioaddr = (uint8_t __iomem *)dev->base_addr;
+	struct fepci_card_private *cp = fp->this_card_priv;
+	if (unlikely(cp->removed)) {
+		return -ENXIO;
+	} else {
+		int i = request_irq(dev->irq, &fepci_interrupt,
+				    IRQF_SHARED, dev->name, dev);
+		if (i) {
+			dev_warn(&dev->dev,
+				 "unable to allocate IRQ %d, error 0x%x\n",
+				 dev->irq, i);
+			return i;
+		}
+	}
+
+	fp->in_eth_mode = true;
+
+	fepci_init_ring(dev);
+
+	fp->cur_rx = 0u;
+	fp->cur_tx = 0u;
+
+	netif_carrier_off(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+
+	set_int_mask(fp->channel_number,
+		     MaskFrameReceived | MaskFrameTransmitted |
+		     MaskRxFifoError | MaskRxFrameDroppedError |
+		     MaskTxFifoError, cp);
+
+	/* Start Rx and Tx channels. */
+	writel(Receive_enable |
+	       (Rx_fifo_threshold & RX_FIFO_THRESHOLD_PACKET_MODE),
+	       ioaddr + fp->reg_rxctrl);
+	writel((Transmit_enable |
+		(Tx_desc_threshold &
+		 (TX_DESC_THRESHOLD_PACKET_MODE << 8)) |
+		(Tx_fifo_threshold & TX_FIFO_THRESHOLD_PACKET_MODE)),
+	       ioaddr + fp->reg_txctrl);
+
+	netif_start_queue(dev);
+
+	init_timer(&fp->timer);
+	fp->timer.expires = jiffies + HZ;
+	fp->timer.data = (unsigned long)dev;
+	fp->timer.function = &fepci_timer;
+	add_timer(&fp->timer);
+
+	return 0;
+}
+
+static int fepci_open(struct net_device *dev)
+{
+	struct fepci_ch_private *fp = netdev_priv(dev);
+	struct fepci_card_private *cp = fp->this_card_priv;
+	int error;
+	mutex_lock(&cp->mutex);
+	if (unlikely(fp->in_stream_mode))
+		fepci_stream_close_down(cp, fp);
+	error = fepci_open_down(dev, fp);
+	mutex_unlock(&cp->mutex);
+	return error;
+}
+
+static void fepci_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct fepci_ch_private *fp = netdev_priv(dev);
+
+	if ((get_common_reg_word(fp->this_card_priv->ioaddr, 0x72) >>
+	     fp->channel_number) & 1u)
+		netif_carrier_off(dev);
+	else
+		netif_carrier_on(dev);
+
+	if (fp->in_eth_mode)
+		mod_timer(&fp->timer, jiffies + 5ul * HZ);
+}
+
+static void fepci_tx_timeout(struct net_device *dev)
+{
+	struct fepci_ch_private *fp = netdev_priv(dev);
+	tasklet_schedule(&fp->transmission);
+}
+
+static int fepci_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct fepci_ch_private *fp;
+	unsigned cur_tx;
+	unsigned next;
+	unsigned tx_length = skb->len;
+	dma_addr_t bus_address;
+
+	if (unlikely(tx_length < ETH_ZLEN)) {
+		if (unlikely(skb_padto(skb, ETH_ZLEN))) {
+			dev->stats.tx_dropped++;
+			return NETDEV_TX_OK;
+		}
+		tx_length = ETH_ZLEN;
+	}
+	fp = netdev_priv(dev);
+	bus_address = pci_map_single(fp->this_card_priv->pci_dev, skb->data,
+				     tx_length, PCI_DMA_TODEVICE);
+	cur_tx = fp->cur_tx;
+	if (likely(!pci_dma_mapping_error(fp->this_card_priv->pci_dev,
+					  bus_address))) {
+		struct fepci_desc __iomem *descriptor;
+		pci_unmap_addr_set(fp->tx + cur_tx, address, bus_address);
+		descriptor = &fp->tx_desc[cur_tx];
+		writel(bus_address, &descriptor->desc_a);
+		writel((tx_length & frame_length) | enable_transfer,
+		       &descriptor->desc_b);
+	} else {
+		return NETDEV_TX_BUSY;
+	}
+
+	fp->tx[cur_tx].skbuff = skb;
+
+	/* Calculate the next transmission descriptor entry. */
+	next = (cur_tx + 1u) & (TX_RING_SIZE - 1u);
+	fp->cur_tx = next;
+	/* If the next descriptor is busy, discontinue taking new ones. */
+	if (fp->tx[next].skbuff != NULL)
+		netif_stop_queue(dev);
+	dev->trans_start = jiffies;
+
+	return NETDEV_TX_OK;
+}
+
+static void retina_tx(unsigned long channel)
+{
+	struct fepci_ch_private *fp = (struct fepci_ch_private *)channel;
+	struct net_device *dev = fp->this_dev;
+	struct fepci_desc __iomem *tx_desc = fp->tx_desc;
+	unsigned d = 0u;
+	struct netdev_queue *txq;
+	do {
+		uint32_t desc_b;
+		struct sk_buff *skb = fp->tx[d].skbuff;
+		struct fepci_desc __iomem *desc;
+		if (skb == NULL)
+			continue;
+		desc = tx_desc + d;
+		desc_b = readl_relaxed(&desc->desc_b);
+		if (!(desc_b & transfer_not_done)) { /* Has been sent. */
+			unsigned int len = skb->len;
+			pci_unmap_single(fp->this_card_priv->pci_dev,
+					 pci_unmap_addr(fp->tx + d, address),
+					 len, PCI_DMA_TODEVICE);
+			dev_kfree_skb(skb);
+			fp->tx[d].skbuff = NULL;
+			if (unlikely(desc_b & fifo_error)) {
+				dev->stats.tx_errors++;
+				dev->stats.tx_fifo_errors++;
+			} else {
+				dev->stats.tx_packets++;
+				dev->stats.tx_bytes += len;
+			}
+		}
+	} while (d++ < TX_RING_SIZE - 1u);
+	txq = netdev_get_tx_queue(dev, 0u);
+	if (__netif_tx_trylock(txq)) {
+		unsigned next = fp->cur_tx;
+		if (netif_queue_stopped(dev) &&
+		    fp->tx[next].skbuff == NULL &&
+		    fp->in_eth_mode)
+			netif_wake_queue(dev);
+		__netif_tx_unlock(txq);
+	} else {
+		tasklet_schedule(&fp->transmission);
+	}
+}
+
+static inline void fepci_rx(struct fepci_ch_private *fp, struct
net_device *dev)
+{
+	unsigned d, old_cur_rx = fp->cur_rx;
+	unsigned last = (old_cur_rx + RX_RING_SIZE - 1u) & (RX_RING_SIZE - 1u);
+	for (d = old_cur_rx; d != last; d = (d + 1u) & (RX_RING_SIZE - 1u)) {
+		uint32_t desc_b;
+		struct sk_buff **rx_skbuff = &fp->rx[d].skbuff;
+		struct sk_buff *skb = *rx_skbuff;
+		struct fepci_desc __iomem *rx_desc = fp->rx_desc + d;
+		if (unlikely(skb == NULL))
+			goto RESERVE;
+		desc_b = readl(&rx_desc->desc_b);
+		if (!(desc_b & transfer_not_done)) { /* Transfer done. */
+			uint32_t length;
+			fp->cur_rx = (d + 1u) & (RX_RING_SIZE - 1u);
+			if (unlikely(desc_b & (fifo_error | size_error |
+					       crc_error | octet_error |
+					       line_error))) {
+				if (desc_b & fifo_error)
+					dev->stats.rx_fifo_errors++;
+				else if (desc_b & size_error)
+					dev->stats.rx_over_errors++;
+				else if (desc_b & crc_error)
+					dev->stats.rx_crc_errors++;
+				else if (desc_b & octet_error)
+					dev->stats.rx_length_errors++;
+				else
+					dev->stats.rx_missed_errors++;
+ENABLE_TRANSFER:		writel(enable_transfer, &rx_desc->desc_b);
+				dev->stats.rx_errors++;
+				continue;
+			}
+			length = (desc_b & frame_length) - 4u;
+			if (unlikely(length > RETINA_MRU)) {
+				dev->stats.rx_over_errors++;
+				goto ENABLE_TRANSFER;
+			}
+			pci_unmap_single(fp->this_card_priv->pci_dev,
+				pci_unmap_addr(fp->rx + d, address),
+				RETINA_MRU, PCI_DMA_FROMDEVICE);
+			__skb_put(skb, length);
+			skb->protocol = eth_type_trans(skb, dev);
+			if (dev->flags & IFF_POINTOPOINT) {
+				/* Everything received is for us. */
+				if (dev->flags & IFF_NOARP) {
+					/* NOARP applied ->
+					 * destination MAC addresses
+					 * are bogus. */
+					if (skb->pkt_type ==
+					    PACKET_OTHERHOST)
+						skb->pkt_type =
+							PACKET_HOST;
+				} else {
+					/* NOARP not applied ->
+					 * destination MAC addresses are
+					 * broadcast. */
+					if (skb->pkt_type ==
+					    PACKET_BROADCAST)
+						skb->pkt_type =
+							PACKET_HOST;
+				}	/* IFF_NOARP */
+			}	/* IFF_POINTOPOINT */
+			netif_rx(skb);
+			dev->stats.rx_bytes += length;
+			dev->stats.rx_packets++;
+			dev->last_rx = jiffies;
+			/* reserve a new one */
+RESERVE:		skb = netdev_alloc_skb(dev, RETINA_DMA_SIZE +
+						    NET_IP_ALIGN);
+			if (unlikely(skb == NULL)) {
+				*rx_skbuff = NULL;
+				continue;	/* Better luck next round. */
+			} else {
+				dma_addr_t address;
+				skb_reserve(skb, NET_IP_ALIGN);
+				address = pci_map_single(fp->this_card_priv->
+							 pci_dev, skb->data,
+							 RETINA_MRU,
+							 PCI_DMA_FROMDEVICE);
+				if (likely(!pci_dma_mapping_error(fp->
+							this_card_priv->pci_dev,
+								  address))) {
+					pci_unmap_addr_set(fp->rx + d,
+							   address, address);
+					*rx_skbuff = skb;
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+					writel(address, &rx_desc->desc_a);
+					writel(enable_transfer,
+					       &rx_desc->desc_b);
+				} else {
+					*rx_skbuff = NULL;
+					dev_kfree_skb_irq(skb);
+					dev_warn(&dev->dev,
+						 "failed to map DMA\n");
+				}
+			}
+		}
+	}
+}
+
+static irqreturn_t fepci_interrupt(int irq, void *dev_instance)
+{
+	struct net_device *dev = dev_instance;
+	uint8_t __iomem *ioaddr = (uint8_t __iomem *)dev->base_addr;
+	struct fepci_ch_private *fp = netdev_priv(dev);
+	const unsigned char channel = fp->channel_number;
+	const uint32_t intr_status = get_int_status(channel, ioaddr);
+
+	if (!intr_status)
+		return IRQ_NONE;
+	clear_int(channel, intr_status, ioaddr);
+
+	if (intr_status &
+		(IntrFrameReceived | IntrRxFifoError | IntrRxFrameDroppedError))
+		fepci_rx(fp, dev);
+	if (intr_status & IntrFrameTransmitted)
+		tasklet_schedule(&fp->transmission);
+	return IRQ_HANDLED;
+}
+
+static void fepci_close_down(struct net_device *dev,
+			     struct fepci_ch_private *fp,
+			     struct fepci_card_private *card)
+{
+	unsigned d;
+	uint8_t __iomem *ioaddr;
+	struct pci_dev *pdev;
+	if (unlikely(!fp->in_eth_mode))
+		return;
+	/* Disable interrupts by clearing the interrupt mask. */
+	set_int_mask(fp->channel_number, 0x0, card);
+
+	/* Stop the transmission and reception processes. */
+	ioaddr = (uint8_t __iomem *)dev->base_addr;
+	writel(0x0, ioaddr + fp->reg_rxctrl);
+	writel(0x0, ioaddr + fp->reg_txctrl);
+	fp->in_eth_mode = false;
+	smp_wmb(); /* Prevent restarting the timer by setting the mode false. */
+	del_timer_sync(&fp->timer);
+
+	free_irq(dev->irq, dev);
+
+	tasklet_kill(&fp->transmission);
+	netif_tx_disable(dev);
+	pdev = card->pci_dev;
+	/* Free all the reception struct sk_buffs. */
+	for (d = 0u; d < RX_RING_SIZE; d++) {
+		struct sk_buff *skb = fp->rx[d].skbuff;
+		if (skb != NULL) {
+			pci_unmap_single(pdev,
+					 pci_unmap_addr(fp->rx + d, address),
+					 RETINA_MRU, PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(skb);
+			fp->rx[d].skbuff = NULL;
+		}
+	}
+	/* Free all the transmission sk_buffs. */
+	for (d = 0u; d < TX_RING_SIZE; d++) {
+		struct sk_buff *skb = fp->tx[d].skbuff;
+		if (skb != NULL) {
+			pci_unmap_single(pdev,
+					 pci_unmap_addr(fp->tx + d, address),
+					 skb->len, PCI_DMA_TODEVICE);
+			dev_kfree_skb(skb);
+			fp->tx[d].skbuff = NULL;
+		}
+	}
+}
+
+static int fepci_close(struct net_device *dev)
+{
+	struct fepci_ch_private *fp = netdev_priv(dev);
+	struct fepci_card_private *card = fp->this_card_priv;
+	if (unlikely(!netif_device_present(dev)))
+		return -ENODEV;
+	fepci_close_down(dev, fp, card);
+	return 0;
+}
+
+static void alarm_off(uint8_t __iomem *ioaddr, unsigned int irq)
+{
+	uint8_t __iomem *ioaddr_reg_custom = ioaddr + reg_custom;
+	do {
+		/* Turn the alarm manager interrupt off. */
+		writel(0u, ioaddr_reg_custom);
+		synchronize_irq(irq);
+	} while (readl_relaxed(ioaddr_reg_custom) & AM_interrupt_mask);
+}
+
+static void fepci_remove_one(struct pci_dev *pdev)
+{
+	struct fepci_card_private *cardp = pci_get_drvdata(pdev);
+	unsigned int c;
+	uint8_t __iomem *ioaddr = cardp->ioaddr;
+	unsigned int irq = pdev->irq;
+	cardp->removed = true;
+	alarm_off(ioaddr, irq);
+
+	for (c = 0u; c < CHANNELS; c++) {
+		struct fepci_ch_private *fp = cardp->ch_privates[c];
+		struct net_device *dev = fp->this_dev;
+		if (unlikely(dev == NULL))
+			continue;
+		unregister_netdev(dev);
+		fepci_stream_close(cardp, fp);
+		free_netdev(dev);
+		cardp->ch_privates[c] = NULL;
+	}
+	free_irq(irq, cardp);
+
+	pci_set_drvdata(pdev, NULL);
+
+	cardp->pci_dev = NULL;
+
+	iounmap(ioaddr);
+
+	pci_disable_device(pdev);
+	pci_release_regions(pdev);
+}
+
+#ifdef CONFIG_PM
+static int fepci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct fepci_card_private *cardp = pci_get_drvdata(pdev);
+	unsigned channel = 0u;
+	unsigned irq = pdev->irq;
+	cardp->removed = true;
+	do {
+		struct fepci_ch_private *fp = cardp->ch_privates[channel];
+		struct net_device *dev = fp->this_dev;
+		bool in_eth_mode;
+		bool in_stream_mode;
+		bool stream_on;
+		if (unlikely(dev == NULL))
+			continue;
+		netif_device_detach(dev);
+		in_eth_mode = fp->in_eth_mode;
+		in_stream_mode = fp->in_stream_mode;
+		stream_on = fp->stream_on;
+		rtnl_lock();
+		if (in_eth_mode)
+			fepci_close_down(fp->this_dev, fp, cardp);
+		else if (in_stream_mode)
+			fepci_stream_close(cardp, fp);
+		rtnl_unlock();
+		fp->in_eth_mode = in_eth_mode;
+		fp->in_stream_mode = in_stream_mode;
+		fp->stream_on = stream_on;
+	} while (channel++ < CHANNELS - 1u);
+	alarm_off(cardp->ioaddr, irq);
+	/* Disable IRQ */
+	free_irq(irq, cardp);
+	cardp->pci_dev = NULL;
+	pci_save_state(pdev);
+	/* Disable IO/bus master/irq router */
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int fepci_resume(struct pci_dev *pdev)
+{
+	struct fepci_card_private *cardp = pci_get_drvdata(pdev);
+	unsigned channel;
+	int error;
+	unsigned irq;
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/* Device's IRQ possibly is changed, driver should take care. */
+	error = pci_enable_device(pdev);
+	if (unlikely(error))
+		return error;
+	pci_set_master(pdev);
+	/* Driver specific operations. */
+	irq = pdev->irq;
+	cardp->pci_dev = pdev;
+	error = request_irq(pdev->irq, &alarm_manager_interrupt,
+			    IRQF_SHARED, fepci_alarm_manager_name, cardp);
+	if (unlikely(error))
+		return error;
+	/* Turn alarm manager interrupt on. */
+	writel(AM_interrupt_mask, cardp->ioaddr + reg_custom);
+	channel = 0u;
+	cardp->removed = false;
+	do {
+		struct fepci_ch_private *fp = cardp->ch_privates[channel];
+		struct net_device *dev = fp->this_dev;
+		if (unlikely(dev == NULL))
+			continue;
+		dev->irq = irq;
+		rtnl_lock();
+		if (fp->in_eth_mode) {
+			int open = fepci_open_down(dev, fp);
+			if (unlikely(open))
+				error = open;
+		} else if (fp->in_stream_mode) {
+			int open;
+			fp->in_stream_mode = false;
+			open = fepci_stream_open(cardp, fp);
+			if (unlikely(open))
+				error = open;
+			if (fp->stream_on) {
+				fp->stream_on = false;
+				open = fepci_stream_start(cardp, fp);
+				if (unlikely(open))
+					error = open;
+			}
+		}
+		rtnl_unlock();
+		netif_device_attach(dev);
+	} while (channel++ < CHANNELS - 1u);
+	return error;
+}
+#endif
+
+static struct pci_driver fepci_driver = {
+	.name		= fepci_name,
+	.id_table	= fepci_pci_tbl,
+	.probe		= fepci_init_one,
+	.remove		= fepci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= fepci_suspend,
+	.resume		= fepci_resume
+#endif
+};
+
+static int __init fepci_init(void)
+{
+	stream_pointers = get_zeroed_page(GFP_KERNEL);
+	if (unlikely(stream_pointers == 0ul))
+		return -ENOMEM;
+	major = pci_register_driver(&fepci_driver);
+	if (unlikely(major))
+		goto FREE;
+	major = fepci_register_char_device();
+	if (unlikely(major < 0)) {
+		pci_unregister_driver(&fepci_driver);
+FREE:
+		free_page(stream_pointers);
+		return major;
+	}
+	return 0;
+}
+
+static void __exit fepci_cleanup(void)
+{
+	unsigned card;
+	pci_unregister_driver(&fepci_driver);
+	fepci_unregister_char_device();
+	free_page(stream_pointers);
+	for (card = 0u; card < find_cnt; card++)
+		kfree(card_privates[card]);
+	kfree(card_privates);
+}
+
+module_init(fepci_init);
+module_exit(fepci_cleanup);
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ