lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1394805887-22030-5-git-send-email-vbridgers2013@gmail.com>
Date:	Fri, 14 Mar 2014 09:04:42 -0500
From:	Vince Bridgers <vbridgers2013@...il.com>
To:	devicetree@...r.kernel.org, netdev@...r.kernel.org,
	linux-doc@...r.kernel.org
Cc:	robh+dt@...nel.org, pawel.moll@....com, mark.rutland@....com,
	ijc+devicetree@...lion.org.uk, galak@...eaurora.org,
	rob@...dley.net, vbridgers2013@...il.com
Subject: [PATCH net-next V5 4/9] Altera TSE: Add Altera Ethernet Driver SGDMA file components

This patch adds the SGDMA soft IP support for the Altera Triple
Speed Ethernet driver.

Signed-off-by: Vince Bridgers <vbridgers2013@...il.com>
---
V5: - No changes for V5

V4: - use netdev_* logging instead of dev_* logging.

V3: - Reorder commits, otherwise no changes to these files

V2: - Use 32-bit lower/upper physical address accessors per comments
      from Florian.
    - Break up driver file submission for initial review by logical
      function.
---
 drivers/net/ethernet/altera/altera_sgdma.c   |  511 ++++++++++++++++++++++++++
 drivers/net/ethernet/altera/altera_sgdma.h   |   35 ++
 drivers/net/ethernet/altera/altera_sgdmahw.h |  124 +++++++
 3 files changed, 670 insertions(+)
 create mode 100644 drivers/net/ethernet/altera/altera_sgdma.c
 create mode 100644 drivers/net/ethernet/altera/altera_sgdma.h
 create mode 100644 drivers/net/ethernet/altera/altera_sgdmahw.h

diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
new file mode 100644
index 0000000..cbeee07
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -0,0 +1,511 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/list.h>
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_sgdmahw.h"
+#include "altera_sgdma.h"
+
+static void sgdma_descrip(struct sgdma_descrip *desc,
+			  struct sgdma_descrip *ndesc,
+			  dma_addr_t ndesc_phys,
+			  dma_addr_t raddr,
+			  dma_addr_t waddr,
+			  u16 length,
+			  int generate_eop,
+			  int rfixed,
+			  int wfixed);
+
+static int sgdma_async_write(struct altera_tse_private *priv,
+			      struct sgdma_descrip *desc);
+
+static int sgdma_async_read(struct altera_tse_private *priv);
+
+static dma_addr_t
+sgdma_txphysaddr(struct altera_tse_private *priv,
+		 struct sgdma_descrip *desc);
+
+static dma_addr_t
+sgdma_rxphysaddr(struct altera_tse_private *priv,
+		 struct sgdma_descrip *desc);
+
+static int sgdma_txbusy(struct altera_tse_private *priv);
+
+static int sgdma_rxbusy(struct altera_tse_private *priv);
+
+static void
+queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
+
+static void
+queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
+
+static struct tse_buffer *
+dequeue_tx(struct altera_tse_private *priv);
+
+static struct tse_buffer *
+dequeue_rx(struct altera_tse_private *priv);
+
+static struct tse_buffer *
+queue_rx_peekhead(struct altera_tse_private *priv);
+
+int sgdma_initialize(struct altera_tse_private *priv)
+{
+	priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
+
+	priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
+		      SGDMA_CTRLREG_ILASTD;
+
+	INIT_LIST_HEAD(&priv->txlisthd);
+	INIT_LIST_HEAD(&priv->rxlisthd);
+
+	priv->rxdescphys = (dma_addr_t) 0;
+	priv->txdescphys = (dma_addr_t) 0;
+
+	priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+					  priv->rxdescmem, DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(priv->device, priv->rxdescphys)) {
+		sgdma_uninitialize(priv);
+		netdev_err(priv->dev, "error mapping rx descriptor memory\n");
+		return -EINVAL;
+	}
+
+	priv->txdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+					  priv->rxdescmem, DMA_TO_DEVICE);
+
+	if (dma_mapping_error(priv->device, priv->txdescphys)) {
+		sgdma_uninitialize(priv);
+		netdev_err(priv->dev, "error mapping tx descriptor memory\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void sgdma_uninitialize(struct altera_tse_private *priv)
+{
+	if (priv->rxdescphys)
+		dma_unmap_single(priv->device, priv->rxdescphys,
+				 priv->rxdescmem, DMA_BIDIRECTIONAL);
+
+	if (priv->txdescphys)
+		dma_unmap_single(priv->device, priv->txdescphys,
+				 priv->txdescmem, DMA_TO_DEVICE);
+}
+
+/* This function resets the SGDMA controller and clears the
+ * descriptor memory used for transmits and receives.
+ */
+void sgdma_reset(struct altera_tse_private *priv)
+{
+	u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
+	u32 txdescriplen   = priv->txdescmem;
+	u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
+	u32 rxdescriplen   = priv->rxdescmem;
+	struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
+	struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
+
+	/* Initialize descriptor memory to 0 */
+	memset(ptxdescripmem, 0, txdescriplen);
+	memset(prxdescripmem, 0, rxdescriplen);
+
+	iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
+	iowrite32(0, &ptxsgdma->control);
+
+	iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
+	iowrite32(0, &prxsgdma->control);
+}
+
+void sgdma_enable_rxirq(struct altera_tse_private *priv)
+{
+	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+	priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
+	tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
+}
+
+void sgdma_enable_txirq(struct altera_tse_private *priv)
+{
+	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+	priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
+	tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
+}
+
+/* for SGDMA, RX interrupts remain enabled after enabling */
+void sgdma_disable_rxirq(struct altera_tse_private *priv)
+{
+}
+
+/* for SGDMA, TX interrupts remain enabled after enabling */
+void sgdma_disable_txirq(struct altera_tse_private *priv)
+{
+}
+
+void sgdma_clear_rxirq(struct altera_tse_private *priv)
+{
+	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+}
+
+void sgdma_clear_txirq(struct altera_tse_private *priv)
+{
+	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+}
+
+/* transmits buffer through SGDMA. Returns number of buffers
+ * transmitted, 0 if not possible.
+ *
+ * tx_lock is held by the caller
+ */
+int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+	int pktstx = 0;
+	struct sgdma_descrip *descbase =
+		(struct sgdma_descrip *)priv->tx_dma_desc;
+
+	struct sgdma_descrip *cdesc = &descbase[0];
+	struct sgdma_descrip *ndesc = &descbase[1];
+
+	/* wait 'til the tx sgdma is ready for the next transmit request */
+	if (sgdma_txbusy(priv))
+		return 0;
+
+	sgdma_descrip(cdesc,			/* current descriptor */
+		      ndesc,			/* next descriptor */
+		      sgdma_txphysaddr(priv, ndesc),
+		      buffer->dma_addr,		/* address of packet to xmit */
+		      0,			/* write addr 0 for tx dma */
+		      buffer->len,		/* length of packet */
+		      SGDMA_CONTROL_EOP,	/* Generate EOP */
+		      0,			/* read fixed */
+		      SGDMA_CONTROL_WR_FIXED);	/* Generate SOP */
+
+	pktstx = sgdma_async_write(priv, cdesc);
+
+	/* enqueue the request to the pending transmit queue */
+	queue_tx(priv, buffer);
+
+	return 1;
+}
+
+
+/* tx_lock held to protect access to queued tx list
+ */
+u32 sgdma_tx_completions(struct altera_tse_private *priv)
+{
+	u32 ready = 0;
+	struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
+
+	if (!sgdma_txbusy(priv) &&
+	    ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+	    (dequeue_tx(priv))) {
+		ready = 1;
+	}
+
+	return ready;
+}
+
+int sgdma_add_rx_desc(struct altera_tse_private *priv,
+		      struct tse_buffer *rxbuffer)
+{
+	queue_rx(priv, rxbuffer);
+	return sgdma_async_read(priv);
+}
+
+/* status is returned on upper 16 bits,
+ * length is returned in lower 16 bits
+ */
+u32 sgdma_rx_status(struct altera_tse_private *priv)
+{
+	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+	struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
+	struct sgdma_descrip *desc = NULL;
+	int pktsrx;
+	unsigned int rxstatus = 0;
+	unsigned int pktlength = 0;
+	unsigned int pktstatus = 0;
+	struct tse_buffer *rxbuffer = NULL;
+
+	dma_sync_single_for_cpu(priv->device,
+				priv->rxdescphys,
+				priv->rxdescmem,
+				DMA_BIDIRECTIONAL);
+
+	desc = &base[0];
+	if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
+	    (desc->status & SGDMA_STATUS_EOP)) {
+		pktlength = desc->bytes_xferred;
+		pktstatus = desc->status & 0x3f;
+		rxstatus = pktstatus;
+		rxstatus = rxstatus << 16;
+		rxstatus |= (pktlength & 0xffff);
+
+		desc->status = 0;
+
+		rxbuffer = dequeue_rx(priv);
+		if (rxbuffer == NULL)
+			netdev_err(priv->dev,
+				   "sgdma rx and rx queue empty!\n");
+
+		/* kick the rx sgdma after reaping this descriptor */
+		pktsrx = sgdma_async_read(priv);
+	}
+
+	return rxstatus;
+}
+
+
+/* Private functions */
+static void sgdma_descrip(struct sgdma_descrip *desc,
+			  struct sgdma_descrip *ndesc,
+			  dma_addr_t ndesc_phys,
+			  dma_addr_t raddr,
+			  dma_addr_t waddr,
+			  u16 length,
+			  int generate_eop,
+			  int rfixed,
+			  int wfixed)
+{
+	/* Clear the next descriptor as not owned by hardware */
+	u32 ctrl = ndesc->control;
+	ctrl &= ~SGDMA_CONTROL_HW_OWNED;
+	ndesc->control = ctrl;
+
+	ctrl = 0;
+	ctrl = SGDMA_CONTROL_HW_OWNED;
+	ctrl |= generate_eop;
+	ctrl |= rfixed;
+	ctrl |= wfixed;
+
+	/* Channel is implicitly zero, initialized to 0 by default */
+
+	desc->raddr = raddr;
+	desc->waddr = waddr;
+	desc->next = lower_32_bits(ndesc_phys);
+	desc->control = ctrl;
+	desc->status = 0;
+	desc->rburst = 0;
+	desc->wburst = 0;
+	desc->bytes = length;
+	desc->bytes_xferred = 0;
+}
+
+/* If hardware is busy, don't restart async read.
+ * if status register is 0 - meaning initial state, restart async read,
+ * probably for the first time when populating a receive buffer.
+ * If read status indicate not busy and a status, restart the async
+ * DMA read.
+ */
+static int sgdma_async_read(struct altera_tse_private *priv)
+{
+	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+	struct sgdma_descrip *descbase =
+		(struct sgdma_descrip *)priv->rx_dma_desc;
+
+	struct sgdma_descrip *cdesc = &descbase[0];
+	struct sgdma_descrip *ndesc = &descbase[1];
+
+	unsigned int sts = ioread32(&csr->status);
+	struct tse_buffer *rxbuffer = NULL;
+
+	if (!sgdma_rxbusy(priv)) {
+		rxbuffer = queue_rx_peekhead(priv);
+		if (rxbuffer == NULL)
+			return 0;
+
+		sgdma_descrip(cdesc,		/* current descriptor */
+			      ndesc,		/* next descriptor */
+			      sgdma_rxphysaddr(priv, ndesc),
+			      0,		/* read addr 0 for rx dma */
+			      rxbuffer->dma_addr, /* write addr for rx dma */
+			      0,		/* read 'til EOP */
+			      0,		/* EOP: NA for rx dma */
+			      0,		/* read fixed: NA for rx dma */
+			      0);		/* SOP: NA for rx DMA */
+
+		/* clear control and status */
+		iowrite32(0, &csr->control);
+
+		/* If statuc available, clear those bits */
+		if (sts & 0xf)
+			iowrite32(0xf, &csr->status);
+
+		dma_sync_single_for_device(priv->device,
+					   priv->rxdescphys,
+					   priv->rxdescmem,
+					   DMA_BIDIRECTIONAL);
+
+		iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+			  &csr->next_descrip);
+
+		iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+			  &csr->control);
+
+		return 1;
+	}
+
+	return 0;
+}
+
+static int sgdma_async_write(struct altera_tse_private *priv,
+			     struct sgdma_descrip *desc)
+{
+	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+
+	if (sgdma_txbusy(priv))
+		return 0;
+
+	/* clear control and status */
+	iowrite32(0, &csr->control);
+	iowrite32(0x1f, &csr->status);
+
+	dma_sync_single_for_device(priv->device, priv->txdescphys,
+				   priv->txdescmem, DMA_TO_DEVICE);
+
+	iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+		  &csr->next_descrip);
+
+	iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
+		  &csr->control);
+
+	return 1;
+}
+
+static dma_addr_t
+sgdma_txphysaddr(struct altera_tse_private *priv,
+		 struct sgdma_descrip *desc)
+{
+	dma_addr_t paddr = priv->txdescmem_busaddr;
+	dma_addr_t offs = (dma_addr_t)((dma_addr_t)desc -
+				(dma_addr_t)priv->tx_dma_desc);
+	return paddr + offs;
+}
+
+static dma_addr_t
+sgdma_rxphysaddr(struct altera_tse_private *priv,
+		 struct sgdma_descrip *desc)
+{
+	dma_addr_t paddr = priv->rxdescmem_busaddr;
+	dma_addr_t offs = (dma_addr_t)((dma_addr_t)desc -
+				(dma_addr_t)priv->rx_dma_desc);
+	return paddr + offs;
+}
+
+#define list_remove_head(list, entry, type, member)			\
+	do {								\
+		entry = NULL;						\
+		if (!list_empty(list)) {				\
+			entry = list_entry((list)->next, type, member);	\
+			list_del_init(&entry->member);			\
+		}							\
+	} while (0)
+
+#define list_peek_head(list, entry, type, member)			\
+	do {								\
+		entry = NULL;						\
+		if (!list_empty(list)) {				\
+			entry = list_entry((list)->next, type, member);	\
+		}							\
+	} while (0)
+
+/* adds a tse_buffer to the tail of a tx buffer list.
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static void
+queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+	list_add_tail(&buffer->lh, &priv->txlisthd);
+}
+
+
+/* adds a tse_buffer to the tail of a rx buffer list
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static void
+queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+	list_add_tail(&buffer->lh, &priv->rxlisthd);
+}
+
+/* dequeues a tse_buffer from the transmit buffer list, otherwise
+ * returns NULL if empty.
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static struct tse_buffer *
+dequeue_tx(struct altera_tse_private *priv)
+{
+	struct tse_buffer *buffer = NULL;
+	list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
+	return buffer;
+}
+
+/* dequeues a tse_buffer from the receive buffer list, otherwise
+ * returns NULL if empty
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static struct tse_buffer *
+dequeue_rx(struct altera_tse_private *priv)
+{
+	struct tse_buffer *buffer = NULL;
+	list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
+	return buffer;
+}
+
+/* dequeues a tse_buffer from the receive buffer list, otherwise
+ * returns NULL if empty
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list while the
+ * head is being examined.
+ */
+static struct tse_buffer *
+queue_rx_peekhead(struct altera_tse_private *priv)
+{
+	struct tse_buffer *buffer = NULL;
+	list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
+	return buffer;
+}
+
+/* check and return rx sgdma status without polling
+ */
+static int sgdma_rxbusy(struct altera_tse_private *priv)
+{
+	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+	return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+}
+
+/* waits for the tx sgdma to finish it's current operation, returns 0
+ * when it transitions to nonbusy, returns 1 if the operation times out
+ */
+static int sgdma_txbusy(struct altera_tse_private *priv)
+{
+	int delay = 0;
+	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+
+	/* if DMA is busy, wait for current transactino to finish */
+	while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+		udelay(1);
+
+	if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+		netdev_err(priv->dev, "timeout waiting for tx dma\n");
+		return 1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
new file mode 100644
index 0000000..07d4717
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -0,0 +1,35 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_SGDMA_H__
+#define __ALTERA_SGDMA_H__
+
+void sgdma_reset(struct altera_tse_private *);
+void sgdma_enable_txirq(struct altera_tse_private *);
+void sgdma_enable_rxirq(struct altera_tse_private *);
+void sgdma_disable_rxirq(struct altera_tse_private *);
+void sgdma_disable_txirq(struct altera_tse_private *);
+void sgdma_clear_rxirq(struct altera_tse_private *);
+void sgdma_clear_txirq(struct altera_tse_private *);
+int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
+u32 sgdma_tx_completions(struct altera_tse_private *);
+int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
+void sgdma_status(struct altera_tse_private *);
+u32 sgdma_rx_status(struct altera_tse_private *);
+int sgdma_initialize(struct altera_tse_private *);
+void sgdma_uninitialize(struct altera_tse_private *);
+
+#endif /*  __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
new file mode 100644
index 0000000..ba3334f
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -0,0 +1,124 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_SGDMAHW_H__
+#define __ALTERA_SGDMAHW_H__
+
+/* SGDMA descriptor structure */
+struct sgdma_descrip {
+	unsigned int	raddr; /* address of data to be read */
+	unsigned int	pad1;
+	unsigned int	waddr;
+	unsigned int    pad2;
+	unsigned int	next;
+	unsigned int	pad3;
+	unsigned short  bytes;
+	unsigned char   rburst;
+	unsigned char	wburst;
+	unsigned short	bytes_xferred;	/* 16 bits, bytes xferred */
+
+	/* bit 0: error
+	 * bit 1: length error
+	 * bit 2: crc error
+	 * bit 3: truncated error
+	 * bit 4: phy error
+	 * bit 5: collision error
+	 * bit 6: reserved
+	 * bit 7: status eop for recv case
+	 */
+	unsigned char	status;
+
+	/* bit 0: eop
+	 * bit 1: read_fixed
+	 * bit 2: write fixed
+	 * bits 3,4,5,6: Channel (always 0)
+	 * bit 7: hardware owned
+	 */
+	unsigned char	control;
+} __packed;
+
+
+#define SGDMA_STATUS_ERR		BIT(0)
+#define SGDMA_STATUS_LENGTH_ERR		BIT(1)
+#define SGDMA_STATUS_CRC_ERR		BIT(2)
+#define SGDMA_STATUS_TRUNC_ERR		BIT(3)
+#define SGDMA_STATUS_PHY_ERR		BIT(4)
+#define SGDMA_STATUS_COLL_ERR		BIT(5)
+#define SGDMA_STATUS_EOP		BIT(7)
+
+#define SGDMA_CONTROL_EOP		BIT(0)
+#define SGDMA_CONTROL_RD_FIXED		BIT(1)
+#define SGDMA_CONTROL_WR_FIXED		BIT(2)
+
+/* Channel is always 0, so just zero initialize it */
+
+#define SGDMA_CONTROL_HW_OWNED		BIT(7)
+
+/* SGDMA register space */
+struct sgdma_csr {
+	/* bit 0: error
+	 * bit 1: eop
+	 * bit 2: descriptor completed
+	 * bit 3: chain completed
+	 * bit 4: busy
+	 * remainder reserved
+	 */
+	u32	status;
+	u32	pad1[3];
+
+	/* bit 0: interrupt on error
+	 * bit 1: interrupt on eop
+	 * bit 2: interrupt after every descriptor
+	 * bit 3: interrupt after last descrip in a chain
+	 * bit 4: global interrupt enable
+	 * bit 5: starts descriptor processing
+	 * bit 6: stop core on dma error
+	 * bit 7: interrupt on max descriptors
+	 * bits 8-15: max descriptors to generate interrupt
+	 * bit 16: Software reset
+	 * bit 17: clears owned by hardware if 0, does not clear otherwise
+	 * bit 18: enables descriptor polling mode
+	 * bit 19-26: clocks before polling again
+	 * bit 27-30: reserved
+	 * bit 31: clear interrupt
+	 */
+	u32	control;
+	u32	pad2[3];
+	u32	next_descrip;
+	u32	pad3[3];
+};
+
+
+#define SGDMA_STSREG_ERR	BIT(0) /* Error */
+#define SGDMA_STSREG_EOP	BIT(1) /* EOP */
+#define SGDMA_STSREG_DESCRIP	BIT(2) /* Descriptor completed */
+#define SGDMA_STSREG_CHAIN	BIT(3) /* Chain completed */
+#define SGDMA_STSREG_BUSY	BIT(4) /* Controller busy */
+
+#define SGDMA_CTRLREG_IOE	BIT(0) /* Interrupt on error */
+#define SGDMA_CTRLREG_IOEOP	BIT(1) /* Interrupt on EOP */
+#define SGDMA_CTRLREG_IDESCRIP	BIT(2) /* Interrupt after every descriptor */
+#define SGDMA_CTRLREG_ILASTD	BIT(3) /* Interrupt after last descriptor */
+#define SGDMA_CTRLREG_INTEN	BIT(4) /* Global Interrupt enable */
+#define SGDMA_CTRLREG_START	BIT(5) /* starts descriptor processing */
+#define SGDMA_CTRLREG_STOPERR	BIT(6) /* stop on dma error */
+#define SGDMA_CTRLREG_INTMAX	BIT(7) /* Interrupt on max descriptors */
+#define SGDMA_CTRLREG_RESET	BIT(16)/* Software reset */
+#define SGDMA_CTRLREG_COBHW	BIT(17)/* Clears owned by hardware */
+#define SGDMA_CTRLREG_POLL	BIT(18)/* enables descriptor polling mode */
+#define SGDMA_CTRLREG_CLRINT	BIT(31)/* Clears interrupt */
+
+#endif /* __ALTERA_SGDMAHW_H__ */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ