lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170412162538.25302-5-ruxandra.radulescu@nxp.com>
Date:   Wed, 12 Apr 2017 11:25:33 -0500
From:   Ioana Radulescu <ruxandra.radulescu@....com>
To:     <gregkh@...uxfoundation.org>
CC:     <devel@...verdev.osuosl.org>, <linux-kernel@...r.kernel.org>,
        <agraf@...e.de>, <arnd@...db.de>,
        <linux-arm-kernel@...ts.infradead.org>,
        <alexandru.marginean@....com>, <bogdan.hamciuc@....com>,
        <stuyoder@...il.com>, <laurentiu.tudor@....com>,
        <ruxandra.radulescu@....com>, <roy.pledge@....com>,
        <haiying.wang@....com>
Subject: [PATCH v2 4/9] staging: fsl-dpaa2/eth: Add Freescale DPAA2 Ethernet driver

Introduce the DPAA2 Ethernet driver, which manages Datapath
Network Interface (DPNI) objects discovered on the MC bus.

In addition to DPNIs, the Ethernet driver uses several other
MC objects to build a network interface abstraction: buffer
pools (DPBPs), I/O Portals (DPIOs) and concentrators (DPCONs).

A more detailed description of the driver can be found in the
associated README file.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@....com>
Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc@....com>
---
v2:
 - update #include paths
 - ndo_get_stats64() now returns void
 - skb_csum_off_chk_help_cmn() was removed in 4.10, so we go back to
   using NETIF_F_IP_CSUM/NETIF_F_IPV6_CSUM in netdev features
 - new way of setting MTU min/max limits

 drivers/staging/fsl-dpaa2/ethernet/Makefile    |    2 +-
 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2455 ++++++++++++++++++++++++
 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h |  304 +++
 3 files changed, 2760 insertions(+), 1 deletion(-)
 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h

diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile
index 83b62644a44b..4897d39a1c21 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
 
-fsl-dpaa2-eth-objs    := dpni.o
+fsl-dpaa2-eth-objs    := dpaa2-eth.o dpni.o
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
new file mode 100644
index 000000000000..abd700e57aeb
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -0,0 +1,2455 @@
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+
+#include "../../fsl-mc/include/mc.h"
+#include "../../fsl-mc/include/mc-sys.h"
+#include "dpaa2-eth.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
+static void validate_rx_csum(struct dpaa2_eth_priv *priv,
+			     u32 fd_status,
+			     struct sk_buff *skb)
+{
+	skb_checksum_none_assert(skb);
+
+	/* HW checksum validation is disabled, nothing to do here */
+	if (!(priv->net_dev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* Read checksum validation bits */
+	if (!((fd_status & DPAA2_FAS_L3CV) &&
+	      (fd_status & DPAA2_FAS_L4CV)))
+		return;
+
+	/* Inform the stack there's no need to compute L3/L4 csum anymore */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/* Free a received FD.
+ * Not to be used for Tx conf FDs or on any other paths.
+ */
+static void free_rx_fd(struct dpaa2_eth_priv *priv,
+		       const struct dpaa2_fd *fd,
+		       void *vaddr)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	dma_addr_t addr = dpaa2_fd_get_addr(fd);
+	u8 fd_format = dpaa2_fd_get_format(fd);
+	struct dpaa2_sg_entry *sgt;
+	void *sg_vaddr;
+	int i;
+
+	/* If single buffer frame, just free the data buffer */
+	if (fd_format == dpaa2_fd_single)
+		goto free_buf;
+	else if (fd_format != dpaa2_fd_sg)
+		/* We don't support any other format */
+		return;
+
+	/* For S/G frames, we first need to free all SG entries */
+	sgt = vaddr + dpaa2_fd_get_offset(fd);
+	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+		addr = dpaa2_sg_get_addr(&sgt[i]);
+		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+				 DMA_FROM_DEVICE);
+
+		sg_vaddr = phys_to_virt(addr);
+		skb_free_frag(sg_vaddr);
+
+		if (dpaa2_sg_is_final(&sgt[i]))
+			break;
+	}
+
+free_buf:
+	skb_free_frag(vaddr);
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
+					struct dpaa2_eth_channel *ch,
+					const struct dpaa2_fd *fd,
+					void *fd_vaddr)
+{
+	struct sk_buff *skb = NULL;
+	u16 fd_offset = dpaa2_fd_get_offset(fd);
+	u32 fd_length = dpaa2_fd_get_len(fd);
+
+	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
+			SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+	if (unlikely(!skb))
+		return NULL;
+
+	skb_reserve(skb, fd_offset);
+	skb_put(skb, fd_length);
+
+	ch->buf_count--;
+
+	return skb;
+}
+
+/* Build a non linear (fragmented) skb based on a S/G table */
+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
+				      struct dpaa2_eth_channel *ch,
+				      struct dpaa2_sg_entry *sgt)
+{
+	struct sk_buff *skb = NULL;
+	struct device *dev = priv->net_dev->dev.parent;
+	void *sg_vaddr;
+	dma_addr_t sg_addr;
+	u16 sg_offset;
+	u32 sg_length;
+	struct page *page, *head_page;
+	int page_offset;
+	int i;
+
+	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+		struct dpaa2_sg_entry *sge = &sgt[i];
+
+		/* NOTE: We only support SG entries in dpaa2_sg_single format,
+		 * but this is the only format we may receive from HW anyway
+		 */
+
+		/* Get the address and length from the S/G entry */
+		sg_addr = dpaa2_sg_get_addr(sge);
+		dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+				 DMA_FROM_DEVICE);
+
+		sg_vaddr = phys_to_virt(sg_addr);
+		sg_length = dpaa2_sg_get_len(sge);
+
+		if (i == 0) {
+			/* We build the skb around the first data buffer */
+			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
+				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+			if (unlikely(!skb))
+				return NULL;
+
+			sg_offset = dpaa2_sg_get_offset(sge);
+			skb_reserve(skb, sg_offset);
+			skb_put(skb, sg_length);
+		} else {
+			/* Rest of the data buffers are stored as skb frags */
+			page = virt_to_page(sg_vaddr);
+			head_page = virt_to_head_page(sg_vaddr);
+
+			/* Offset in page (which may be compound).
+			 * Data in subsequent SG entries is stored from the
+			 * beginning of the buffer, so we don't need to add the
+			 * sg_offset.
+			 */
+			page_offset = ((unsigned long)sg_vaddr &
+				(PAGE_SIZE - 1)) +
+				(page_address(page) - page_address(head_page));
+
+			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+					sg_length, DPAA2_ETH_RX_BUF_SIZE);
+		}
+
+		if (dpaa2_sg_is_final(sge))
+			break;
+	}
+
+	/* Count all data buffers + SG table buffer */
+	ch->buf_count -= i + 2;
+
+	return skb;
+}
+
+/* Main Rx frame processing routine */
+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+			 struct dpaa2_eth_channel *ch,
+			 const struct dpaa2_fd *fd,
+			 struct napi_struct *napi)
+{
+	dma_addr_t addr = dpaa2_fd_get_addr(fd);
+	u8 fd_format = dpaa2_fd_get_format(fd);
+	void *vaddr;
+	struct sk_buff *skb;
+	struct rtnl_link_stats64 *percpu_stats;
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpaa2_fas *fas;
+	u32 status = 0;
+
+	dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+	vaddr = phys_to_virt(addr);
+
+	prefetch(vaddr + priv->buf_layout.private_data_size);
+	prefetch(vaddr + dpaa2_fd_get_offset(fd));
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+	if (fd_format == dpaa2_fd_single) {
+		skb = build_linear_skb(priv, ch, fd, vaddr);
+	} else if (fd_format == dpaa2_fd_sg) {
+		struct dpaa2_sg_entry *sgt =
+				vaddr + dpaa2_fd_get_offset(fd);
+		skb = build_frag_skb(priv, ch, sgt);
+		skb_free_frag(vaddr);
+	} else {
+		/* We don't support any other format */
+		goto err_frame_format;
+	}
+
+	if (unlikely(!skb))
+		goto err_build_skb;
+
+	prefetch(skb->data);
+
+	/* Check if we need to validate the L4 csum */
+	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+		fas = (struct dpaa2_fas *)
+				(vaddr + priv->buf_layout.private_data_size);
+		status = le32_to_cpu(fas->status);
+		validate_rx_csum(priv, status, skb);
+	}
+
+	skb->protocol = eth_type_trans(skb, priv->net_dev);
+
+	percpu_stats->rx_packets++;
+	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+
+	if (priv->net_dev->features & NETIF_F_GRO)
+		napi_gro_receive(napi, skb);
+	else
+		netif_receive_skb(skb);
+
+	return;
+
+err_build_skb:
+	free_rx_fd(priv, fd, vaddr);
+err_frame_format:
+	percpu_stats->rx_dropped++;
+}
+
+/* Consume all frames pull-dequeued into the store. This is the simplest way to
+ * make sure we don't accidentally issue another volatile dequeue which would
+ * overwrite (leak) frames already in the store.
+ *
+ * Observance of NAPI budget is not our concern, leaving that to the caller.
+ */
+static int consume_frames(struct dpaa2_eth_channel *ch)
+{
+	struct dpaa2_eth_priv *priv = ch->priv;
+	struct dpaa2_eth_fq *fq;
+	struct dpaa2_dq *dq;
+	const struct dpaa2_fd *fd;
+	int cleaned = 0;
+	int is_last;
+
+	do {
+		dq = dpaa2_io_store_next(ch->store, &is_last);
+		if (unlikely(!dq)) {
+			/* If we're here, we *must* have placed a
+			 * volatile dequeue comnmand, so keep reading through
+			 * the store until we get some sort of valid response
+			 * token (either a valid frame or an "empty dequeue")
+			 */
+			continue;
+		}
+
+		fd = dpaa2_dq_fd(dq);
+		fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
+
+		fq->consume(priv, ch, fd, &ch->napi);
+		cleaned++;
+	} while (!is_last);
+
+	return cleaned;
+}
+
+/* Create a frame descriptor based on a fragmented skb */
+static int build_sg_fd(struct dpaa2_eth_priv *priv,
+		       struct sk_buff *skb,
+		       struct dpaa2_fd *fd)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	void *sgt_buf = NULL;
+	void *hwa;
+	dma_addr_t addr;
+	int nr_frags = skb_shinfo(skb)->nr_frags;
+	struct dpaa2_sg_entry *sgt;
+	int i, err;
+	int sgt_buf_size;
+	struct scatterlist *scl, *crt_scl;
+	int num_sg;
+	int num_dma_bufs;
+	struct dpaa2_eth_swa *swa;
+
+	/* Create and map scatterlist.
+	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
+	 * to go beyond nr_frags+1.
+	 * Note: We don't support chained scatterlists
+	 */
+	if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
+		return -EINVAL;
+
+	scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+	if (unlikely(!scl))
+		return -ENOMEM;
+
+	sg_init_table(scl, nr_frags + 1);
+	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
+	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+	if (unlikely(!num_dma_bufs)) {
+		err = -ENOMEM;
+		goto dma_map_sg_failed;
+	}
+
+	/* Prepare the HW SGT structure */
+	sgt_buf_size = priv->tx_data_offset +
+		       sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
+	sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
+	if (unlikely(!sgt_buf)) {
+		err = -ENOMEM;
+		goto sgt_buf_alloc_failed;
+	}
+	sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
+
+	/* PTA from egress side is passed as is to the confirmation side so
+	 * we need to clear some fields here in order to find consistent values
+	 * on TX confirmation. We are clearing FAS (Frame Annotation Status)
+	 * field from the hardware annotation area
+	 */
+	hwa = sgt_buf + priv->buf_layout.private_data_size;
+	memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
+
+	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+	/* Fill in the HW SGT structure.
+	 *
+	 * sgt_buf is zeroed out, so the following fields are implicit
+	 * in all sgt entries:
+	 *   - offset is 0
+	 *   - format is 'dpaa2_sg_single'
+	 */
+	for_each_sg(scl, crt_scl, num_dma_bufs, i) {
+		dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
+		dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
+	}
+	dpaa2_sg_set_final(&sgt[i - 1], true);
+
+	/* Store the skb backpointer in the SGT buffer.
+	 * Fit the scatterlist and the number of buffers alongside the
+	 * skb backpointer in the software annotation area. We'll need
+	 * all of them on Tx Conf.
+	 */
+	swa = (struct dpaa2_eth_swa *)sgt_buf;
+	swa->skb = skb;
+	swa->scl = scl;
+	swa->num_sg = num_sg;
+	swa->num_dma_bufs = num_dma_bufs;
+
+	/* Separately map the SGT buffer */
+	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, addr))) {
+		err = -ENOMEM;
+		goto dma_map_single_failed;
+	}
+	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+	dpaa2_fd_set_addr(fd, addr);
+	dpaa2_fd_set_len(fd, skb->len);
+	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
+			  DPAA2_FD_CTRL_PTV1);
+
+	return 0;
+
+dma_map_single_failed:
+	kfree(sgt_buf);
+sgt_buf_alloc_failed:
+	dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+dma_map_sg_failed:
+	kfree(scl);
+	return err;
+}
+
+/* Create a frame descriptor based on a linear skb */
+static int build_single_fd(struct dpaa2_eth_priv *priv,
+			   struct sk_buff *skb,
+			   struct dpaa2_fd *fd)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	u8 *buffer_start;
+	void *hwa;
+	struct sk_buff **skbh;
+	dma_addr_t addr;
+
+	buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
+				 DPAA2_ETH_TX_BUF_ALIGN,
+				 DPAA2_ETH_TX_BUF_ALIGN);
+
+	/* PTA from egress side is passed as is to the confirmation side so
+	 * we need to clear some fields here in order to find consistent values
+	 * on TX confirmation. We are clearing FAS (Frame Annotation Status)
+	 * field from the hardware annotation area
+	 */
+	hwa = buffer_start + priv->buf_layout.private_data_size;
+	memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
+
+	/* Store a backpointer to the skb at the beginning of the buffer
+	 * (in the private data area) such that we can release it
+	 * on Tx confirm
+	 */
+	skbh = (struct sk_buff **)buffer_start;
+	*skbh = skb;
+
+	addr = dma_map_single(dev, buffer_start,
+			      skb_tail_pointer(skb) - buffer_start,
+			      DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, addr)))
+		return -ENOMEM;
+
+	dpaa2_fd_set_addr(fd, addr);
+	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
+	dpaa2_fd_set_len(fd, skb->len);
+	dpaa2_fd_set_format(fd, dpaa2_fd_single);
+	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
+			  DPAA2_FD_CTRL_PTV1);
+
+	return 0;
+}
+
+/* FD freeing routine on the Tx path
+ *
+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
+ * back-pointed to is also freed.
+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
+ * dpaa2_eth_tx().
+ * Optionally, return the frame annotation status word (FAS), which needs
+ * to be checked if we're on the confirmation path.
+ */
+static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+		       const struct dpaa2_fd *fd,
+		       u32 *status)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	dma_addr_t fd_addr;
+	struct sk_buff **skbh, *skb;
+	unsigned char *buffer_start;
+	int unmap_size;
+	struct scatterlist *scl;
+	int num_sg, num_dma_bufs;
+	struct dpaa2_eth_swa *swa;
+	u8 fd_format = dpaa2_fd_get_format(fd);
+	struct dpaa2_fas *fas;
+
+	fd_addr = dpaa2_fd_get_addr(fd);
+	skbh = phys_to_virt(fd_addr);
+
+	if (fd_format == dpaa2_fd_single) {
+		skb = *skbh;
+		buffer_start = (unsigned char *)skbh;
+		/* Accessing the skb buffer is safe before dma unmap, because
+		 * we didn't map the actual skb shell.
+		 */
+		dma_unmap_single(dev, fd_addr,
+				 skb_tail_pointer(skb) - buffer_start,
+				 DMA_TO_DEVICE);
+	} else if (fd_format == dpaa2_fd_sg) {
+		swa = (struct dpaa2_eth_swa *)skbh;
+		skb = swa->skb;
+		scl = swa->scl;
+		num_sg = swa->num_sg;
+		num_dma_bufs = swa->num_dma_bufs;
+
+		/* Unmap the scatterlist */
+		dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+		kfree(scl);
+
+		/* Unmap the SGT buffer */
+		unmap_size = priv->tx_data_offset +
+		       sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
+		dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE);
+	} else {
+		/* Unsupported format, mark it as errored and give up */
+		if (status)
+			*status = ~0;
+		return;
+	}
+
+	/* Read the status from the Frame Annotation after we unmap the first
+	 * buffer but before we free it. The caller function is responsible
+	 * for checking the status value.
+	 */
+	if (status && (dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+		fas = (struct dpaa2_fas *)
+			((void *)skbh + priv->buf_layout.private_data_size);
+		*status = le32_to_cpu(fas->status);
+	}
+
+	/* Free SGT buffer kmalloc'ed on tx */
+	if (fd_format != dpaa2_fd_single)
+		kfree(skbh);
+
+	/* Move on with skb release */
+	dev_kfree_skb(skb);
+}
+
+static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct dpaa2_fd fd;
+	struct rtnl_link_stats64 *percpu_stats;
+	struct dpaa2_eth_fq *fq;
+	u16 queue_mapping;
+	int err, i;
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+	if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
+		struct sk_buff *ns;
+
+		ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
+		if (unlikely(!ns)) {
+			percpu_stats->tx_dropped++;
+			goto err_alloc_headroom;
+		}
+		dev_kfree_skb(skb);
+		skb = ns;
+	}
+
+	/* We'll be holding a back-reference to the skb until Tx Confirmation;
+	 * we don't want that overwritten by a concurrent Tx with a cloned skb.
+	 */
+	skb = skb_unshare(skb, GFP_ATOMIC);
+	if (unlikely(!skb)) {
+		/* skb_unshare() has already freed the skb */
+		percpu_stats->tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	/* Setup the FD fields */
+	memset(&fd, 0, sizeof(fd));
+
+	if (skb_is_nonlinear(skb))
+		err = build_sg_fd(priv, skb, &fd);
+	else
+		err = build_single_fd(priv, skb, &fd);
+	if (unlikely(err)) {
+		percpu_stats->tx_dropped++;
+		goto err_build_fd;
+	}
+
+	/* TxConf FQ selection primarily based on cpu affinity; this is
+	 * non-migratable context, so it's safe to call smp_processor_id().
+	 */
+	queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
+	fq = &priv->fq[queue_mapping];
+	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+		err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
+						  fq->tx_qdbin, &fd);
+		if (err != -EBUSY)
+			break;
+	}
+	if (unlikely(err < 0)) {
+		percpu_stats->tx_errors++;
+		/* Clean up everything, including freeing the skb */
+		free_tx_fd(priv, &fd, NULL);
+	} else {
+		percpu_stats->tx_packets++;
+		percpu_stats->tx_bytes += skb->len;
+	}
+
+	return NETDEV_TX_OK;
+
+err_build_fd:
+err_alloc_headroom:
+	dev_kfree_skb(skb);
+
+	return NETDEV_TX_OK;
+}
+
+/* Tx confirmation frame processing routine */
+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
+			      struct dpaa2_eth_channel *ch,
+			      const struct dpaa2_fd *fd,
+			      struct napi_struct *napi __always_unused)
+{
+	struct rtnl_link_stats64 *percpu_stats;
+	u32 status = 0;
+
+	free_tx_fd(priv, fd, &status);
+
+	if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) {
+		percpu_stats = this_cpu_ptr(priv->percpu_stats);
+		/* Tx-conf logically pertains to the egress path. */
+		percpu_stats->tx_errors++;
+	}
+}
+
+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+	int err;
+
+	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+			       DPNI_OFF_RX_L3_CSUM, enable);
+	if (err) {
+		netdev_err(priv->net_dev,
+			   "dpni_set_offload(RX_L3_CSUM) failed\n");
+		return err;
+	}
+
+	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+			       DPNI_OFF_RX_L4_CSUM, enable);
+	if (err) {
+		netdev_err(priv->net_dev,
+			   "dpni_set_offload(RX_L4_CSUM) failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+	int err;
+
+	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+			       DPNI_OFF_TX_L3_CSUM, enable);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+		return err;
+	}
+
+	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+			       DPNI_OFF_TX_L4_CSUM, enable);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+	void *buf;
+	dma_addr_t addr;
+	int i;
+
+	for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+		/* Allocate buffer visible to WRIOP + skb shared info +
+		 * alignment padding
+		 */
+		buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
+		if (unlikely(!buf))
+			goto err_alloc;
+
+		buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
+
+		addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
+				      DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(dev, addr)))
+			goto err_map;
+
+		buf_array[i] = addr;
+	}
+
+release_bufs:
+	/* In case the portal is busy, retry until successful.
+	 * The buffer release function would only fail if the QBMan portal
+	 * was busy, which implies portal contention (i.e. more CPUs than
+	 * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
+	 * there is little we can realistically do, short of giving up -
+	 * in which case we'd risk depleting the buffer pool and never again
+	 * receiving the Rx interrupt which would kick-start the refill logic.
+	 * So just keep retrying, at the risk of being moved to ksoftirqd.
+	 */
+	while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
+		cpu_relax();
+	return i;
+
+err_map:
+	skb_free_frag(buf);
+err_alloc:
+	if (i)
+		goto release_bufs;
+
+	return 0;
+}
+
+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+	int i, j;
+	int new_count;
+
+	/* This is the lazy seeding of Rx buffer pools.
+	 * dpaa2_add_bufs() is also used on the Rx hotpath and calls
+	 * napi_alloc_frag(). The trouble with that is that it in turn ends up
+	 * calling this_cpu_ptr(), which mandates execution in atomic context.
+	 * Rather than splitting up the code, do a one-off preempt disable.
+	 */
+	preempt_disable();
+	for (j = 0; j < priv->num_channels; j++) {
+		for (i = 0; i < DPAA2_ETH_NUM_BUFS;
+		     i += DPAA2_ETH_BUFS_PER_CMD) {
+			new_count = add_bufs(priv, bpid);
+			priv->channel[j]->buf_count += new_count;
+
+			if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
+				preempt_enable();
+				return -ENOMEM;
+			}
+		}
+	}
+	preempt_enable();
+
+	return 0;
+}
+
+/**
+ * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
+ */
+static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+	void *vaddr;
+	int ret, i;
+
+	do {
+		ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid,
+					       buf_array, count);
+		if (ret < 0) {
+			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
+			return;
+		}
+		for (i = 0; i < ret; i++) {
+			/* Same logic as on regular Rx path */
+			dma_unmap_single(dev, buf_array[i],
+					 DPAA2_ETH_RX_BUF_SIZE,
+					 DMA_FROM_DEVICE);
+			vaddr = phys_to_virt(buf_array[i]);
+			skb_free_frag(vaddr);
+		}
+	} while (ret);
+}
+
+static void drain_pool(struct dpaa2_eth_priv *priv)
+{
+	int i;
+
+	drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
+	drain_bufs(priv, 1);
+
+	for (i = 0; i < priv->num_channels; i++)
+		priv->channel[i]->buf_count = 0;
+}
+
+/* Function is called from softirq context only, so we don't need to guard
+ * the access to percpu count
+ */
+static int refill_pool(struct dpaa2_eth_priv *priv,
+		       struct dpaa2_eth_channel *ch,
+		       u16 bpid)
+{
+	int new_count;
+
+	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
+		return 0;
+
+	do {
+		new_count = add_bufs(priv, bpid);
+		if (unlikely(!new_count)) {
+			/* Out of memory; abort for now, we'll try later on */
+			break;
+		}
+		ch->buf_count += new_count;
+	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
+
+	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int pull_channel(struct dpaa2_eth_channel *ch)
+{
+	int err;
+
+	/* Retry while portal is busy */
+	do {
+		err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
+		cpu_relax();
+	} while (err == -EBUSY);
+
+	return err;
+}
+
+/* NAPI poll routine
+ *
+ * Frames are dequeued from the QMan channel associated with this NAPI context.
+ * Rx, Tx confirmation and (if configured) Rx error frames all count
+ * towards the NAPI budget.
+ */
+static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+{
+	struct dpaa2_eth_channel *ch;
+	int cleaned = 0, store_cleaned;
+	struct dpaa2_eth_priv *priv;
+	int err;
+
+	ch = container_of(napi, struct dpaa2_eth_channel, napi);
+	priv = ch->priv;
+
+	while (cleaned < budget) {
+		err = pull_channel(ch);
+		if (unlikely(err))
+			break;
+
+		/* Refill pool if appropriate */
+		refill_pool(priv, ch, priv->dpbp_attrs.bpid);
+
+		store_cleaned = consume_frames(ch);
+		cleaned += store_cleaned;
+
+		/* If we have enough budget left for a full store,
+		 * try a new pull dequeue, otherwise we're done here
+		 */
+		if (store_cleaned == 0 ||
+		    cleaned > budget - DPAA2_ETH_STORE_SIZE)
+			break;
+	}
+
+	if (cleaned < budget) {
+		napi_complete_done(napi, cleaned);
+		/* Re-enable data available notifications */
+		do {
+			err = dpaa2_io_service_rearm(NULL, &ch->nctx);
+			cpu_relax();
+		} while (err == -EBUSY);
+	}
+
+	return cleaned;
+}
+
+static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_eth_channel *ch;
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		napi_enable(&ch->napi);
+	}
+}
+
+static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_eth_channel *ch;
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		napi_disable(&ch->napi);
+	}
+}
+
+static int link_state_update(struct dpaa2_eth_priv *priv)
+{
+	struct dpni_link_state state;
+	int err;
+
+	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+	if (unlikely(err)) {
+		netdev_err(priv->net_dev,
+			   "dpni_get_link_state() failed\n");
+		return err;
+	}
+
+	/* Chech link state; speed / duplex changes are not treated yet */
+	if (priv->link_state.up == state.up)
+		return 0;
+
+	priv->link_state = state;
+	if (state.up) {
+		netif_carrier_on(priv->net_dev);
+		netif_tx_start_all_queues(priv->net_dev);
+	} else {
+		netif_tx_stop_all_queues(priv->net_dev);
+		netif_carrier_off(priv->net_dev);
+	}
+
+	netdev_info(priv->net_dev, "Link Event: state %s",
+		    state.up ? "up" : "down");
+
+	return 0;
+}
+
+static int dpaa2_eth_open(struct net_device *net_dev)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	int err;
+
+	err = seed_pool(priv, priv->dpbp_attrs.bpid);
+	if (err) {
+		/* Not much to do; the buffer pool, though not filled up,
+		 * may still contain some buffers which would enable us
+		 * to limp on.
+		 */
+		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+			   priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid);
+	}
+
+	/* We'll only start the txqs when the link is actually ready; make sure
+	 * we don't race against the link up notification, which may come
+	 * immediately after dpni_enable();
+	 */
+	netif_tx_stop_all_queues(net_dev);
+	enable_ch_napi(priv);
+	/* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
+	 * return true and cause 'ip link show' to report the LOWER_UP flag,
+	 * even though the link notification wasn't even received.
+	 */
+	netif_carrier_off(net_dev);
+
+	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
+	if (err < 0) {
+		netdev_err(net_dev, "dpni_enable() failed\n");
+		goto enable_err;
+	}
+
+	/* If the DPMAC object has already processed the link up interrupt,
+	 * we have to learn the link state ourselves.
+	 */
+	err = link_state_update(priv);
+	if (err < 0) {
+		netdev_err(net_dev, "Can't update link state\n");
+		goto link_state_err;
+	}
+
+	return 0;
+
+link_state_err:
+enable_err:
+	disable_ch_napi(priv);
+	drain_pool(priv);
+	return err;
+}
+
+/* The DPIO store must be empty when we call this,
+ * at the end of every NAPI cycle.
+ */
+static u32 drain_channel(struct dpaa2_eth_priv *priv,
+			 struct dpaa2_eth_channel *ch)
+{
+	u32 drained = 0, total = 0;
+
+	do {
+		pull_channel(ch);
+		drained = consume_frames(ch);
+		total += drained;
+	} while (drained);
+
+	return total;
+}
+
+static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_eth_channel *ch;
+	int i;
+	u32 drained = 0;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		drained += drain_channel(priv, ch);
+	}
+
+	return drained;
+}
+
+static int dpaa2_eth_stop(struct net_device *net_dev)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	int dpni_enabled;
+	int retries = 10;
+	u32 drained;
+
+	netif_tx_stop_all_queues(net_dev);
+	netif_carrier_off(net_dev);
+
+	/* Loop while dpni_disable() attempts to drain the egress FQs
+	 * and confirm them back to us.
+	 */
+	do {
+		dpni_disable(priv->mc_io, 0, priv->mc_token);
+		dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
+		if (dpni_enabled)
+			/* Allow the hardware some slack */
+			msleep(100);
+	} while (dpni_enabled && --retries);
+	if (!retries) {
+		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
+		/* Must go on and disable NAPI nonetheless, so we don't crash at
+		 * the next "ifconfig up"
+		 */
+	}
+
+	/* Wait for NAPI to complete on every core and disable it.
+	 * In particular, this will also prevent NAPI from being rescheduled if
+	 * a new CDAN is serviced, effectively discarding the CDAN. We therefore
+	 * don't even need to disarm the channels, except perhaps for the case
+	 * of a huge coalescing value.
+	 */
+	disable_ch_napi(priv);
+
+	 /* Manually drain the Rx and TxConf queues */
+	drained = drain_ingress_frames(priv);
+	if (drained)
+		netdev_dbg(net_dev, "Drained %d frames.\n", drained);
+
+	/* Empty the buffer pool */
+	drain_pool(priv);
+
+	return 0;
+}
+
+static int dpaa2_eth_init(struct net_device *net_dev)
+{
+	u64 supported = 0;
+	u64 not_supported = 0;
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	u32 options = priv->dpni_attrs.options;
+
+	/* Capabilities listing */
+	supported |= IFF_LIVE_ADDR_CHANGE;
+
+	if (options & DPNI_OPT_NO_MAC_FILTER)
+		not_supported |= IFF_UNICAST_FLT;
+	else
+		supported |= IFF_UNICAST_FLT;
+
+	net_dev->priv_flags |= supported;
+	net_dev->priv_flags &= ~not_supported;
+
+	/* Features */
+	net_dev->features = NETIF_F_RXCSUM |
+			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+			    NETIF_F_SG | NETIF_F_HIGHDMA |
+			    NETIF_F_LLTX;
+	net_dev->hw_features = net_dev->features;
+
+	return 0;
+}
+
+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct device *dev = net_dev->dev.parent;
+	int err;
+
+	err = eth_mac_addr(net_dev, addr);
+	if (err < 0) {
+		dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
+		return err;
+	}
+
+	err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+					net_dev->dev_addr);
+	if (err) {
+		dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+/** Fill in counters maintained by the GPP driver. These may be different from
+ * the hardware counters obtained by ethtool.
+ */
+void dpaa2_eth_get_stats(struct net_device *net_dev,
+			 struct rtnl_link_stats64 *stats)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct rtnl_link_stats64 *percpu_stats;
+	u64 *cpustats;
+	u64 *netstats = (u64 *)stats;
+	int i, j;
+	int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+	for_each_possible_cpu(i) {
+		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
+		cpustats = (u64 *)percpu_stats;
+		for (j = 0; j < num; j++)
+			netstats[j] += cpustats[j];
+	}
+}
+
+static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	int err;
+
+	/* Set the maximum Rx frame length to match the transmit side;
+	 * account for L2 headers when computing the MFL
+	 */
+	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
+					(u16)DPAA2_ETH_L2_MAX_FRM(mtu));
+	if (err) {
+		netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
+		return err;
+	}
+
+	net_dev->mtu = mtu;
+	return 0;
+}
+
+/* Copy mac unicast addresses from @net_dev to @priv.
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void add_uc_hw_addr(const struct net_device *net_dev,
+			   struct dpaa2_eth_priv *priv)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+
+	netdev_for_each_uc_addr(ha, net_dev) {
+		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+					ha->addr);
+		if (err)
+			netdev_warn(priv->net_dev,
+				    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
+				    ha->addr, err);
+	}
+}
+
+/* Copy mac multicast addresses from @net_dev to @priv
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void add_mc_hw_addr(const struct net_device *net_dev,
+			   struct dpaa2_eth_priv *priv)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+
+	netdev_for_each_mc_addr(ha, net_dev) {
+		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+					ha->addr);
+		if (err)
+			netdev_warn(priv->net_dev,
+				    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
+				    ha->addr, err);
+	}
+}
+
+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	int uc_count = netdev_uc_count(net_dev);
+	int mc_count = netdev_mc_count(net_dev);
+	u8 max_mac = priv->dpni_attrs.mac_filter_entries;
+	u32 options = priv->dpni_attrs.options;
+	u16 mc_token = priv->mc_token;
+	struct fsl_mc_io *mc_io = priv->mc_io;
+	int err;
+
+	/* Basic sanity checks; these probably indicate a misconfiguration */
+	if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
+		netdev_info(net_dev,
+			    "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
+			    max_mac);
+
+	/* Force promiscuous if the uc or mc counts exceed our capabilities. */
+	if (uc_count > max_mac) {
+		netdev_info(net_dev,
+			    "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
+			    uc_count, max_mac);
+		goto force_promisc;
+	}
+	if (mc_count + uc_count > max_mac) {
+		netdev_info(net_dev,
+			    "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
+			    uc_count + mc_count, max_mac);
+		goto force_mc_promisc;
+	}
+
+	/* Adjust promisc settings due to flag combinations */
+	if (net_dev->flags & IFF_PROMISC)
+		goto force_promisc;
+	if (net_dev->flags & IFF_ALLMULTI) {
+		/* First, rebuild unicast filtering table. This should be done
+		 * in promisc mode, in order to avoid frame loss while we
+		 * progressively add entries to the table.
+		 * We don't know whether we had been in promisc already, and
+		 * making an MC call to find out is expensive; so set uc promisc
+		 * nonetheless.
+		 */
+		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+		if (err)
+			netdev_warn(net_dev, "Can't set uc promisc\n");
+
+		/* Actual uc table reconstruction. */
+		err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
+		if (err)
+			netdev_warn(net_dev, "Can't clear uc filters\n");
+		add_uc_hw_addr(net_dev, priv);
+
+		/* Finally, clear uc promisc and set mc promisc as requested. */
+		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+		if (err)
+			netdev_warn(net_dev, "Can't clear uc promisc\n");
+		goto force_mc_promisc;
+	}
+
+	/* Neither unicast, nor multicast promisc will be on... eventually.
+	 * For now, rebuild mac filtering tables while forcing both of them on.
+	 */
+	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
+	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
+
+	/* Actual mac filtering tables reconstruction */
+	err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't clear mac filters\n");
+	add_mc_hw_addr(net_dev, priv);
+	add_uc_hw_addr(net_dev, priv);
+
+	/* Now we can clear both ucast and mcast promisc, without risking
+	 * to drop legitimate frames anymore.
+	 */
+	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+	if (err)
+		netdev_warn(net_dev, "Can't clear ucast promisc\n");
+	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
+	if (err)
+		netdev_warn(net_dev, "Can't clear mcast promisc\n");
+
+	return;
+
+force_promisc:
+	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't set ucast promisc\n");
+force_mc_promisc:
+	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't set mcast promisc\n");
+}
+
+static int dpaa2_eth_set_features(struct net_device *net_dev,
+				  netdev_features_t features)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	netdev_features_t changed = features ^ net_dev->features;
+	bool enable;
+	int err;
+
+	if (changed & NETIF_F_RXCSUM) {
+		enable = !!(features & NETIF_F_RXCSUM);
+		err = set_rx_csum(priv, enable);
+		if (err)
+			return err;
+	}
+
+	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+		enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+		err = set_tx_csum(priv, enable);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static const struct net_device_ops dpaa2_eth_ops = {
+	.ndo_open = dpaa2_eth_open,
+	.ndo_start_xmit = dpaa2_eth_tx,
+	.ndo_stop = dpaa2_eth_stop,
+	.ndo_init = dpaa2_eth_init,
+	.ndo_set_mac_address = dpaa2_eth_set_addr,
+	.ndo_get_stats64 = dpaa2_eth_get_stats,
+	.ndo_change_mtu = dpaa2_eth_change_mtu,
+	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
+	.ndo_set_features = dpaa2_eth_set_features,
+};
+
+static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
+{
+	struct dpaa2_eth_channel *ch;
+
+	ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
+	napi_schedule_irqoff(&ch->napi);
+}
+
+/* Allocate and configure a DPCON object */
+static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
+{
+	struct fsl_mc_device *dpcon;
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpcon_attr attrs;
+	int err;
+
+	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
+				     FSL_MC_POOL_DPCON, &dpcon);
+	if (err) {
+		dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+		return NULL;
+	}
+
+	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
+	if (err) {
+		dev_err(dev, "dpcon_open() failed\n");
+		goto err_open;
+	}
+
+	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
+	if (err) {
+		dev_err(dev, "dpcon_reset() failed\n");
+		goto err_reset;
+	}
+
+	err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
+	if (err) {
+		dev_err(dev, "dpcon_get_attributes() failed\n");
+		goto err_get_attr;
+	}
+
+	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
+	if (err) {
+		dev_err(dev, "dpcon_enable() failed\n");
+		goto err_enable;
+	}
+
+	return dpcon;
+
+err_enable:
+err_get_attr:
+err_reset:
+	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+err_open:
+	fsl_mc_object_free(dpcon);
+
+	return NULL;
+}
+
+static void free_dpcon(struct dpaa2_eth_priv *priv,
+		       struct fsl_mc_device *dpcon)
+{
+	dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
+	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+	fsl_mc_object_free(dpcon);
+}
+
+static struct dpaa2_eth_channel *
+alloc_channel(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_eth_channel *channel;
+	struct dpcon_attr attr;
+	struct device *dev = priv->net_dev->dev.parent;
+	int err;
+
+	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+	if (!channel)
+		return NULL;
+
+	channel->dpcon = setup_dpcon(priv);
+	if (!channel->dpcon)
+		goto err_setup;
+
+	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
+				   &attr);
+	if (err) {
+		dev_err(dev, "dpcon_get_attributes() failed\n");
+		goto err_get_attr;
+	}
+
+	channel->dpcon_id = attr.id;
+	channel->ch_id = attr.qbman_ch_id;
+	channel->priv = priv;
+
+	return channel;
+
+err_get_attr:
+	free_dpcon(priv, channel->dpcon);
+err_setup:
+	kfree(channel);
+	return NULL;
+}
+
+static void free_channel(struct dpaa2_eth_priv *priv,
+			 struct dpaa2_eth_channel *channel)
+{
+	free_dpcon(priv, channel->dpcon);
+	kfree(channel);
+}
+
+/* DPIO setup: allocate and configure QBMan channels, setup core affinity
+ * and register data availability notifications
+ */
+static int setup_dpio(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_io_notification_ctx *nctx;
+	struct dpaa2_eth_channel *channel;
+	struct dpcon_notification_cfg dpcon_notif_cfg;
+	struct device *dev = priv->net_dev->dev.parent;
+	int i, err;
+
+	/* We want the ability to spread ingress traffic (RX, TX conf) to as
+	 * many cores as possible, so we need one channel for each core
+	 * (unless there's fewer queues than cores, in which case the extra
+	 * channels would be wasted).
+	 * Allocate one channel per core and register it to the core's
+	 * affine DPIO. If not enough channels are available for all cores
+	 * or if some cores don't have an affine DPIO, there will be no
+	 * ingress frame processing on those cores.
+	 */
+	cpumask_clear(&priv->dpio_cpumask);
+	for_each_online_cpu(i) {
+		/* Try to allocate a channel */
+		channel = alloc_channel(priv);
+		if (!channel) {
+			dev_info(dev,
+				 "No affine channel for cpu %d and above\n", i);
+			goto err_alloc_ch;
+		}
+
+		priv->channel[priv->num_channels] = channel;
+
+		nctx = &channel->nctx;
+		nctx->is_cdan = 1;
+		nctx->cb = cdan_cb;
+		nctx->id = channel->ch_id;
+		nctx->desired_cpu = i;
+
+		/* Register the new context */
+		err = dpaa2_io_service_register(NULL, nctx);
+		if (err) {
+			dev_info(dev, "No affine DPIO for cpu %d\n", i);
+			/* If no affine DPIO for this core, there's probably
+			 * none available for next cores either.
+			 */
+			goto err_service_reg;
+		}
+
+		/* Register DPCON notification with MC */
+		dpcon_notif_cfg.dpio_id = nctx->dpio_id;
+		dpcon_notif_cfg.priority = 0;
+		dpcon_notif_cfg.user_ctx = nctx->qman64;
+		err = dpcon_set_notification(priv->mc_io, 0,
+					     channel->dpcon->mc_handle,
+					     &dpcon_notif_cfg);
+		if (err) {
+			dev_err(dev, "dpcon_set_notification failed()\n");
+			goto err_set_cdan;
+		}
+
+		/* If we managed to allocate a channel and also found an affine
+		 * DPIO for this core, add it to the final mask
+		 */
+		cpumask_set_cpu(i, &priv->dpio_cpumask);
+		priv->num_channels++;
+
+		/* Stop if we already have enough channels to accommodate all
+		 * RX and TX conf queues
+		 */
+		if (priv->num_channels == dpaa2_eth_queue_count(priv))
+			break;
+	}
+
+	return 0;
+
+err_set_cdan:
+	dpaa2_io_service_deregister(NULL, nctx);
+err_service_reg:
+	free_channel(priv, channel);
+err_alloc_ch:
+	if (cpumask_empty(&priv->dpio_cpumask)) {
+		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
+		return -ENODEV;
+	}
+
+	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
+		 cpumask_pr_args(&priv->dpio_cpumask));
+
+	return 0;
+}
+
+static void free_dpio(struct dpaa2_eth_priv *priv)
+{
+	int i;
+	struct dpaa2_eth_channel *ch;
+
+	/* deregister CDAN notifications and free channels */
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		dpaa2_io_service_deregister(NULL, &ch->nctx);
+		free_channel(priv, ch);
+	}
+}
+
+static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
+						    int cpu)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++)
+		if (priv->channel[i]->nctx.desired_cpu == cpu)
+			return priv->channel[i];
+
+	/* We should never get here. Issue a warning and return
+	 * the first channel, because it's still better than nothing
+	 */
+	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
+
+	return priv->channel[0];
+}
+
+static void set_fq_affinity(struct dpaa2_eth_priv *priv)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpaa2_eth_fq *fq;
+	int rx_cpu, txc_cpu;
+	int i;
+
+	/* For each FQ, pick one channel/CPU to deliver frames to.
+	 * This may well change at runtime, either through irqbalance or
+	 * through direct user intervention.
+	 */
+	rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
+
+	for (i = 0; i < priv->num_fqs; i++) {
+		fq = &priv->fq[i];
+		switch (fq->type) {
+		case DPAA2_RX_FQ:
+			fq->target_cpu = rx_cpu;
+			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
+			if (rx_cpu >= nr_cpu_ids)
+				rx_cpu = cpumask_first(&priv->dpio_cpumask);
+			break;
+		case DPAA2_TX_CONF_FQ:
+			fq->target_cpu = txc_cpu;
+			txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
+			if (txc_cpu >= nr_cpu_ids)
+				txc_cpu = cpumask_first(&priv->dpio_cpumask);
+			break;
+		default:
+			dev_err(dev, "Unknown FQ type: %d\n", fq->type);
+		}
+		fq->channel = get_affine_channel(priv, fq->target_cpu);
+	}
+}
+
+static void setup_fqs(struct dpaa2_eth_priv *priv)
+{
+	int i;
+
+	/* We have one TxConf FQ per Tx flow.
+	 * The number of Tx and Rx queues is the same.
+	 * Tx queues come first in the fq array.
+	 */
+	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+		priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
+		priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
+		priv->fq[priv->num_fqs++].flowid = (u16)i;
+	}
+
+	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+		priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+		priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+		priv->fq[priv->num_fqs++].flowid = (u16)i;
+	}
+
+	/* For each FQ, decide on which core to process incoming frames */
+	set_fq_affinity(priv);
+}
+
+/* Allocate and configure one buffer pool for each interface */
+static int setup_dpbp(struct dpaa2_eth_priv *priv)
+{
+	int err;
+	struct fsl_mc_device *dpbp_dev;
+	struct device *dev = priv->net_dev->dev.parent;
+
+	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+				     &dpbp_dev);
+	if (err) {
+		dev_err(dev, "DPBP device allocation failed\n");
+		return err;
+	}
+
+	priv->dpbp_dev = dpbp_dev;
+
+	err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
+			&dpbp_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dpbp_open() failed\n");
+		goto err_open;
+	}
+
+	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dpbp_enable() failed\n");
+		goto err_enable;
+	}
+
+	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
+				  &priv->dpbp_attrs);
+	if (err) {
+		dev_err(dev, "dpbp_get_attributes() failed\n");
+		goto err_get_attr;
+	}
+
+	return 0;
+
+err_get_attr:
+	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_enable:
+	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_open:
+	fsl_mc_object_free(dpbp_dev);
+
+	return err;
+}
+
+static void free_dpbp(struct dpaa2_eth_priv *priv)
+{
+	drain_pool(priv);
+	dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+	dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+	fsl_mc_object_free(priv->dpbp_dev);
+}
+
+/* Configure the DPNI object this interface is associated with */
+static int setup_dpni(struct fsl_mc_device *ls_dev)
+{
+	struct device *dev = &ls_dev->dev;
+	struct dpaa2_eth_priv *priv;
+	struct net_device *net_dev;
+	int err;
+
+	net_dev = dev_get_drvdata(dev);
+	priv = netdev_priv(net_dev);
+
+	priv->dpni_id = ls_dev->obj_desc.id;
+
+	/* get a handle for the DPNI object */
+	err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
+	if (err) {
+		dev_err(dev, "dpni_open() failed\n");
+		goto err_open;
+	}
+
+	ls_dev->mc_io = priv->mc_io;
+	ls_dev->mc_handle = priv->mc_token;
+
+	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+	if (err) {
+		dev_err(dev, "dpni_reset() failed\n");
+		goto err_reset;
+	}
+
+	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
+				  &priv->dpni_attrs);
+	if (err) {
+		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
+		goto err_get_attr;
+	}
+
+	/* Configure buffer layouts */
+	/* rx buffer */
+	priv->buf_layout.pass_parser_result = true;
+	priv->buf_layout.pass_frame_status = true;
+	priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
+	priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
+	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+				   DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+				   DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
+				   DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
+	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_RX, &priv->buf_layout);
+	if (err) {
+		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
+		goto err_buf_layout;
+	}
+
+	/* tx buffer */
+	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+				   DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_TX, &priv->buf_layout);
+	if (err) {
+		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
+		goto err_buf_layout;
+	}
+
+	/* tx-confirm buffer */
+	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_TX_CONFIRM, &priv->buf_layout);
+	if (err) {
+		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
+		goto err_buf_layout;
+	}
+
+	/* Now that we've set our tx buffer layout, retrieve the minimum
+	 * required tx data offset.
+	 */
+	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
+				      &priv->tx_data_offset);
+	if (err) {
+		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
+		goto err_data_offset;
+	}
+
+	if ((priv->tx_data_offset % 64) != 0)
+		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
+			 priv->tx_data_offset);
+
+	/* Accommodate software annotation space (SWA) */
+	priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
+
+	return 0;
+
+err_data_offset:
+err_buf_layout:
+err_get_attr:
+err_reset:
+	dpni_close(priv->mc_io, 0, priv->mc_token);
+err_open:
+	return err;
+}
+
+static void free_dpni(struct dpaa2_eth_priv *priv)
+{
+	int err;
+
+	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+	if (err)
+		netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
+			    err);
+
+	dpni_close(priv->mc_io, 0, priv->mc_token);
+}
+
+static int setup_rx_flow(struct dpaa2_eth_priv *priv,
+			 struct dpaa2_eth_fq *fq)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpni_queue queue;
+	struct dpni_queue_id qid;
+	struct dpni_taildrop td;
+	int err;
+
+	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
+	if (err) {
+		dev_err(dev, "dpni_get_queue(RX) failed\n");
+		return err;
+	}
+
+	fq->fqid = qid.fqid;
+
+	queue.destination.id = fq->channel->dpcon_id;
+	queue.destination.type = DPNI_DEST_DPCON;
+	queue.destination.priority = 1;
+	queue.user_context = (u64)fq;
+	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+			     DPNI_QUEUE_RX, 0, fq->flowid,
+			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+			     &queue);
+	if (err) {
+		dev_err(dev, "dpni_set_queue(RX) failed\n");
+		return err;
+	}
+
+	td.enable = 1;
+	td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+	err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
+				DPNI_QUEUE_RX, 0, fq->flowid, &td);
+	if (err) {
+		dev_err(dev, "dpni_set_threshold() failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int setup_tx_flow(struct dpaa2_eth_priv *priv,
+			 struct dpaa2_eth_fq *fq)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpni_queue queue;
+	struct dpni_queue_id qid;
+	int err;
+
+	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+			     DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
+	if (err) {
+		dev_err(dev, "dpni_get_queue(TX) failed\n");
+		return err;
+	}
+
+	fq->tx_qdbin = qid.qdbin;
+
+	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+			     &queue, &qid);
+	if (err) {
+		dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
+		return err;
+	}
+
+	fq->fqid = qid.fqid;
+
+	queue.destination.id = fq->channel->dpcon_id;
+	queue.destination.type = DPNI_DEST_DPCON;
+	queue.destination.priority = 0;
+	queue.user_context = (u64)fq;
+	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+			     &queue);
+	if (err) {
+		dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
+static const struct dpaa2_eth_hash_fields hash_fields[] = {
+	{
+		/* IP header */
+		.rxnfc_field = RXH_IP_SRC,
+		.cls_prot = NET_PROT_IP,
+		.cls_field = NH_FLD_IP_SRC,
+		.size = 4,
+	}, {
+		.rxnfc_field = RXH_IP_DST,
+		.cls_prot = NET_PROT_IP,
+		.cls_field = NH_FLD_IP_DST,
+		.size = 4,
+	}, {
+		.rxnfc_field = RXH_L3_PROTO,
+		.cls_prot = NET_PROT_IP,
+		.cls_field = NH_FLD_IP_PROTO,
+		.size = 1,
+	}, {
+		/* Using UDP ports, this is functionally equivalent to raw
+		 * byte pairs from L4 header.
+		 */
+		.rxnfc_field = RXH_L4_B_0_1,
+		.cls_prot = NET_PROT_UDP,
+		.cls_field = NH_FLD_UDP_PORT_SRC,
+		.size = 2,
+	}, {
+		.rxnfc_field = RXH_L4_B_2_3,
+		.cls_prot = NET_PROT_UDP,
+		.cls_field = NH_FLD_UDP_PORT_DST,
+		.size = 2,
+	},
+};
+
+/* Set RX hash options
+ * flags is a combination of RXH_ bits
+ */
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+{
+	struct device *dev = net_dev->dev.parent;
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct dpkg_profile_cfg cls_cfg;
+	struct dpni_rx_tc_dist_cfg dist_cfg;
+	u8 *dma_mem;
+	int i;
+	int err = 0;
+
+	if (!dpaa2_eth_hash_enabled(priv)) {
+		dev_err(dev, "Hashing support is not enabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	memset(&cls_cfg, 0, sizeof(cls_cfg));
+
+	for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
+		struct dpkg_extract *key =
+			&cls_cfg.extracts[cls_cfg.num_extracts];
+
+		if (!(flags & hash_fields[i].rxnfc_field))
+			continue;
+
+		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+			dev_err(dev, "error adding key extraction rule, too many rules?\n");
+			return -E2BIG;
+		}
+
+		key->type = DPKG_EXTRACT_FROM_HDR;
+		key->extract.from_hdr.prot = hash_fields[i].cls_prot;
+		key->extract.from_hdr.type = DPKG_FULL_FIELD;
+		key->extract.from_hdr.field = hash_fields[i].cls_field;
+		cls_cfg.num_extracts++;
+	}
+
+	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
+	if (!dma_mem)
+		return -ENOMEM;
+
+	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
+	if (err) {
+		dev_err(dev, "dpni_prepare_key_cfg error %d", err);
+		goto err_prep_key;
+	}
+
+	memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+	/* Prepare for setting the rx dist */
+	dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem,
+					       DPAA2_CLASSIFIER_DMA_SIZE,
+					       DMA_TO_DEVICE);
+	if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) {
+		dev_err(dev, "DMA mapping failed\n");
+		err = -ENOMEM;
+		goto err_dma_map;
+	}
+
+	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
+	dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova,
+			 DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
+	if (err)
+		dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
+
+err_dma_map:
+err_prep_key:
+	kfree(dma_mem);
+	return err;
+}
+
+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
+ * frame queues and channels
+ */
+static int bind_dpni(struct dpaa2_eth_priv *priv)
+{
+	struct net_device *net_dev = priv->net_dev;
+	struct device *dev = net_dev->dev.parent;
+	struct dpni_pools_cfg pools_params;
+	struct dpni_error_cfg err_cfg;
+	int err = 0;
+	int i;
+
+	pools_params.num_dpbp = 1;
+	pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+	pools_params.pools[0].backup_pool = 0;
+	pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
+	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+	if (err) {
+		dev_err(dev, "dpni_set_pools() failed\n");
+		return err;
+	}
+
+	/* have the interface implicitly distribute traffic based on supported
+	 * header fields
+	 */
+	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
+	if (err)
+		netdev_err(net_dev, "Failed to configure hashing\n");
+
+	/* Configure handling of error frames */
+	err_cfg.errors = DPAA2_ETH_RX_ERR_MASK;
+	err_cfg.set_frame_annotation = 1;
+	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
+				       &err_cfg);
+	if (err) {
+		dev_err(dev, "dpni_set_errors_behavior failed\n");
+		return err;
+	}
+
+	/* Configure Rx and Tx conf queues to generate CDANs */
+	for (i = 0; i < priv->num_fqs; i++) {
+		switch (priv->fq[i].type) {
+		case DPAA2_RX_FQ:
+			err = setup_rx_flow(priv, &priv->fq[i]);
+			break;
+		case DPAA2_TX_CONF_FQ:
+			err = setup_tx_flow(priv, &priv->fq[i]);
+			break;
+		default:
+			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
+			return -EINVAL;
+		}
+		if (err)
+			return err;
+	}
+
+	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
+			    DPNI_QUEUE_TX, &priv->tx_qdid);
+	if (err) {
+		dev_err(dev, "dpni_get_qdid() failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/* Allocate rings for storing incoming frame descriptors */
+static int alloc_rings(struct dpaa2_eth_priv *priv)
+{
+	struct net_device *net_dev = priv->net_dev;
+	struct device *dev = net_dev->dev.parent;
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		priv->channel[i]->store =
+			dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
+		if (!priv->channel[i]->store) {
+			netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
+			goto err_ring;
+		}
+	}
+
+	return 0;
+
+err_ring:
+	for (i = 0; i < priv->num_channels; i++) {
+		if (!priv->channel[i]->store)
+			break;
+		dpaa2_io_store_destroy(priv->channel[i]->store);
+	}
+
+	return -ENOMEM;
+}
+
+static void free_rings(struct dpaa2_eth_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++)
+		dpaa2_io_store_destroy(priv->channel[i]->store);
+}
+
+static int netdev_init(struct net_device *net_dev)
+{
+	int err;
+	struct device *dev = net_dev->dev.parent;
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
+	u8 bcast_addr[ETH_ALEN];
+
+	net_dev->netdev_ops = &dpaa2_eth_ops;
+
+	/* Get firmware address, if any */
+	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
+	if (err) {
+		dev_err(dev, "dpni_get_port_mac_addr() failed\n");
+		return err;
+	}
+
+	/* Get DPNI attributes address, if any */
+	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+					dpni_mac_addr);
+	if (err) {
+		dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
+		return err;
+	}
+
+	/* First check if firmware has any address configured by bootloader */
+	if (!is_zero_ether_addr(mac_addr)) {
+		/* If the DPMAC addr != DPNI addr, update it */
+		if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
+			err = dpni_set_primary_mac_addr(priv->mc_io, 0,
+							priv->mc_token,
+							mac_addr);
+			if (err) {
+				dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+				return err;
+			}
+		}
+		memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+	} else if (is_zero_ether_addr(dpni_mac_addr)) {
+		/* Fills in net_dev->dev_addr, as required by
+		 * register_netdevice()
+		 */
+		eth_hw_addr_random(net_dev);
+		/* Make the user aware, without cluttering the boot log */
+		dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
+		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+						net_dev->dev_addr);
+		if (err) {
+			dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err);
+			return err;
+		}
+		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+		 * practical purposes, this will be our "permanent" mac address,
+		 * at least until the next reboot. This move will also permit
+		 * register_netdevice() to properly fill up net_dev->perm_addr.
+		 */
+		net_dev->addr_assign_type = NET_ADDR_PERM;
+	} else {
+		/* NET_ADDR_PERM is default, all we have to do is
+		 * fill in the device addr.
+		 */
+		memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+	}
+
+	/* Explicitly add the broadcast address to the MAC filtering table;
+	 * the MC won't do that for us.
+	 */
+	eth_broadcast_addr(bcast_addr);
+	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
+	if (err) {
+		dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
+		/* Won't return an error; at least, we'd have egress traffic */
+	}
+
+	/* Reserve enough space to align buffer as per hardware requirement;
+	 * NOTE: priv->tx_data_offset MUST be initialized at this point.
+	 */
+	net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
+
+	/* Set MTU limits */
+	net_dev->min_mtu = 68;
+	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
+
+	/* Our .ndo_init will be called herein */
+	err = register_netdev(net_dev);
+	if (err < 0) {
+		dev_err(dev, "register_netdev() failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int poll_link_state(void *arg)
+{
+	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
+	int err;
+
+	while (!kthread_should_stop()) {
+		err = link_state_update(priv);
+		if (unlikely(err))
+			return err;
+
+		msleep(DPAA2_ETH_LINK_STATE_REFRESH);
+	}
+
+	return 0;
+}
+
+static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
+{
+	u32 status, clear = 0;
+	struct device *dev = (struct device *)arg;
+	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
+	struct net_device *net_dev = dev_get_drvdata(dev);
+	int err;
+
+	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
+				  DPNI_IRQ_INDEX, &status);
+	if (unlikely(err)) {
+		netdev_err(net_dev, "Can't get irq status (err %d)", err);
+		clear = 0xffffffff;
+		goto out;
+	}
+
+	if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
+		clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
+		link_state_update(netdev_priv(net_dev));
+	}
+
+out:
+	dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
+			      DPNI_IRQ_INDEX, clear);
+	return IRQ_HANDLED;
+}
+
+static int setup_irqs(struct fsl_mc_device *ls_dev)
+{
+	int err = 0;
+	struct fsl_mc_device_irq *irq;
+
+	err = fsl_mc_allocate_irqs(ls_dev);
+	if (err) {
+		dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
+		return err;
+	}
+
+	irq = ls_dev->irqs[0];
+	err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+					dpni_irq0_handler,
+					dpni_irq0_handler_thread,
+					IRQF_NO_SUSPEND | IRQF_ONESHOT,
+					dev_name(&ls_dev->dev), &ls_dev->dev);
+	if (err < 0) {
+		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
+		goto free_mc_irq;
+	}
+
+	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
+				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
+	if (err < 0) {
+		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
+		goto free_irq;
+	}
+
+	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
+				  DPNI_IRQ_INDEX, 1);
+	if (err < 0) {
+		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
+		goto free_irq;
+	}
+
+	return 0;
+
+free_irq:
+	devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
+free_mc_irq:
+	fsl_mc_free_irqs(ls_dev);
+
+	return err;
+}
+
+static void add_ch_napi(struct dpaa2_eth_priv *priv)
+{
+	int i;
+	struct dpaa2_eth_channel *ch;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
+		netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
+			       NAPI_POLL_WEIGHT);
+	}
+}
+
+static void del_ch_napi(struct dpaa2_eth_priv *priv)
+{
+	int i;
+	struct dpaa2_eth_channel *ch;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		netif_napi_del(&ch->napi);
+	}
+}
+
+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+{
+	struct device *dev;
+	struct net_device *net_dev = NULL;
+	struct dpaa2_eth_priv *priv = NULL;
+	int err = 0;
+
+	dev = &dpni_dev->dev;
+
+	/* Net device */
+	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
+	if (!net_dev) {
+		dev_err(dev, "alloc_etherdev_mq() failed\n");
+		return -ENOMEM;
+	}
+
+	SET_NETDEV_DEV(net_dev, dev);
+	dev_set_drvdata(dev, net_dev);
+
+	priv = netdev_priv(net_dev);
+	priv->net_dev = net_dev;
+
+	/* Obtain a MC portal */
+	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+				     &priv->mc_io);
+	if (err) {
+		dev_err(dev, "MC portal allocation failed\n");
+		goto err_portal_alloc;
+	}
+
+	/* MC objects initialization and configuration */
+	err = setup_dpni(dpni_dev);
+	if (err)
+		goto err_dpni_setup;
+
+	err = setup_dpio(priv);
+	if (err)
+		goto err_dpio_setup;
+
+	setup_fqs(priv);
+
+	err = setup_dpbp(priv);
+	if (err)
+		goto err_dpbp_setup;
+
+	err = bind_dpni(priv);
+	if (err)
+		goto err_bind;
+
+	/* Add a NAPI context for each channel */
+	add_ch_napi(priv);
+
+	/* Percpu statistics */
+	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
+	if (!priv->percpu_stats) {
+		dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
+		err = -ENOMEM;
+		goto err_alloc_percpu_stats;
+	}
+
+	err = netdev_init(net_dev);
+	if (err)
+		goto err_netdev_init;
+
+	/* Configure checksum offload based on current interface flags */
+	err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
+	if (err)
+		goto err_csum;
+
+	err = set_tx_csum(priv, !!(net_dev->features &
+				   (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
+	if (err)
+		goto err_csum;
+
+	err = alloc_rings(priv);
+	if (err)
+		goto err_alloc_rings;
+
+	err = setup_irqs(dpni_dev);
+	if (err) {
+		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
+		priv->poll_thread = kthread_run(poll_link_state, priv,
+						"%s_poll_link", net_dev->name);
+		if (IS_ERR(priv->poll_thread)) {
+			netdev_err(net_dev, "Error starting polling thread\n");
+			goto err_poll_thread;
+		}
+		priv->do_link_poll = true;
+	}
+
+	dev_info(dev, "Probed interface %s\n", net_dev->name);
+	return 0;
+
+err_poll_thread:
+	free_rings(priv);
+err_alloc_rings:
+err_csum:
+	unregister_netdev(net_dev);
+err_netdev_init:
+	free_percpu(priv->percpu_stats);
+err_alloc_percpu_stats:
+	del_ch_napi(priv);
+err_bind:
+	free_dpbp(priv);
+err_dpbp_setup:
+	free_dpio(priv);
+err_dpio_setup:
+	free_dpni(priv);
+err_dpni_setup:
+	fsl_mc_portal_free(priv->mc_io);
+err_portal_alloc:
+	dev_set_drvdata(dev, NULL);
+	free_netdev(net_dev);
+
+	return err;
+}
+
+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
+{
+	struct device *dev;
+	struct net_device *net_dev;
+	struct dpaa2_eth_priv *priv;
+
+	dev = &ls_dev->dev;
+	net_dev = dev_get_drvdata(dev);
+	priv = netdev_priv(net_dev);
+
+	unregister_netdev(net_dev);
+	dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+
+	if (priv->do_link_poll)
+		kthread_stop(priv->poll_thread);
+	else
+		fsl_mc_free_irqs(ls_dev);
+
+	free_rings(priv);
+	free_percpu(priv->percpu_stats);
+
+	del_ch_napi(priv);
+	free_dpbp(priv);
+	free_dpio(priv);
+	free_dpni(priv);
+
+	fsl_mc_portal_free(priv->mc_io);
+
+	dev_set_drvdata(dev, NULL);
+	free_netdev(net_dev);
+
+	return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
+	{
+		.vendor = FSL_MC_VENDOR_FREESCALE,
+		.obj_type = "dpni",
+	},
+	{ .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
+
+static struct fsl_mc_driver dpaa2_eth_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = dpaa2_eth_probe,
+	.remove = dpaa2_eth_remove,
+	.match_id_table = dpaa2_eth_match_id_table
+};
+
+module_fsl_mc_driver(dpaa2_eth_driver);
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
new file mode 100644
index 000000000000..7b194a47df2f
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -0,0 +1,304 @@
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA2_ETH_H
+#define __DPAA2_ETH_H
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#include "../../fsl-mc/include/dpaa2-io.h"
+#include "../../fsl-mc/include/dpaa2-fd.h"
+#include "../../fsl-mc/include/dpbp.h"
+#include "../../fsl-mc/include/dpcon.h"
+#include "dpni.h"
+#include "dpni-cmd.h"
+
+#define DPAA2_ETH_STORE_SIZE		16
+
+/* Maximum number of scatter-gather entries in an ingress frame,
+ * considering the maximum receive frame size is 64K
+ */
+#define DPAA2_ETH_MAX_SG_ENTRIES	((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
+
+/* Maximum acceptable MTU value. It is in direct relation with the hardware
+ * enforced Max Frame Length (currently 10k).
+ */
+#define DPAA2_ETH_MFL			(10 * 1024)
+#define DPAA2_ETH_MAX_MTU		(DPAA2_ETH_MFL - VLAN_ETH_HLEN)
+/* Convert L3 MTU to L2 MFL */
+#define DPAA2_ETH_L2_MAX_FRM(mtu)	((mtu) + VLAN_ETH_HLEN)
+
+/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
+ * frames in the Rx queues (length of the current frame is not
+ * taken into account when making the taildrop decision)
+ */
+#define DPAA2_ETH_TAILDROP_THRESH	(64 * 1024)
+
+/* Buffer quota per queue. Must be large enough such that for minimum sized
+ * frames taildrop kicks in before the bpool gets depleted, so we compute
+ * how many 64B frames fit inside the taildrop threshold and add a margin
+ * to accommodate the buffer refill delay.
+ */
+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE	(DPAA2_ETH_TAILDROP_THRESH / 64)
+#define DPAA2_ETH_NUM_BUFS		(DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
+#define DPAA2_ETH_REFILL_THRESH		DPAA2_ETH_MAX_FRAMES_PER_QUEUE
+
+/* Maximum number of buffers that can be acquired/released through a single
+ * QBMan command
+ */
+#define DPAA2_ETH_BUFS_PER_CMD		7
+
+/* Hardware requires alignment for ingress/egress buffer addresses
+ * and ingress buffer lengths.
+ */
+#define DPAA2_ETH_RX_BUF_SIZE		2048
+#define DPAA2_ETH_TX_BUF_ALIGN		64
+#define DPAA2_ETH_RX_BUF_ALIGN		256
+#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
+	((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
+
+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
+ * buffers large enough to allow building an skb around them and also account
+ * for alignment restrictions
+ */
+#define DPAA2_ETH_BUF_RAW_SIZE \
+	(DPAA2_ETH_RX_BUF_SIZE + \
+	SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
+	DPAA2_ETH_RX_BUF_ALIGN)
+
+/* We are accommodating a skb backpointer and some S/G info
+ * in the frame's software annotation. The hardware
+ * options are either 0 or 64, so we choose the latter.
+ */
+#define DPAA2_ETH_SWA_SIZE		64
+
+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
+struct dpaa2_eth_swa {
+	struct sk_buff *skb;
+	struct scatterlist *scl;
+	int num_sg;
+	int num_dma_bufs;
+};
+
+/* Annotation valid bits in FD FRC */
+#define DPAA2_FD_FRC_FASV		0x8000
+#define DPAA2_FD_FRC_FAEADV		0x4000
+#define DPAA2_FD_FRC_FAPRV		0x2000
+#define DPAA2_FD_FRC_FAIADV		0x1000
+#define DPAA2_FD_FRC_FASWOV		0x0800
+#define DPAA2_FD_FRC_FAICFDV		0x0400
+
+/* Annotation bits in FD CTRL */
+#define DPAA2_FD_CTRL_ASAL		0x00020000	/* ASAL = 128 */
+#define DPAA2_FD_CTRL_PTA		0x00800000
+#define DPAA2_FD_CTRL_PTV1		0x00400000
+
+/* Frame annotation status */
+struct dpaa2_fas {
+	u8 reserved;
+	u8 ppid;
+	__le16 ifpid;
+	__le32 status;
+} __packed;
+
+/* Frame annotation status word is located in the first 8 bytes
+ * of the buffer's hardware annoatation area
+ */
+#define DPAA2_FAS_OFFSET		0
+#define DPAA2_FAS_SIZE			(sizeof(struct dpaa2_fas))
+
+/* Error and status bits in the frame annotation status word */
+/* Debug frame, otherwise supposed to be discarded */
+#define DPAA2_FAS_DISC			0x80000000
+/* MACSEC frame */
+#define DPAA2_FAS_MS			0x40000000
+#define DPAA2_FAS_PTP			0x08000000
+/* Ethernet multicast frame */
+#define DPAA2_FAS_MC			0x04000000
+/* Ethernet broadcast frame */
+#define DPAA2_FAS_BC			0x02000000
+#define DPAA2_FAS_KSE			0x00040000
+#define DPAA2_FAS_EOFHE			0x00020000
+#define DPAA2_FAS_MNLE			0x00010000
+#define DPAA2_FAS_TIDE			0x00008000
+#define DPAA2_FAS_PIEE			0x00004000
+/* Frame length error */
+#define DPAA2_FAS_FLE			0x00002000
+/* Frame physical error */
+#define DPAA2_FAS_FPE			0x00001000
+#define DPAA2_FAS_PTE			0x00000080
+#define DPAA2_FAS_ISP			0x00000040
+#define DPAA2_FAS_PHE			0x00000020
+#define DPAA2_FAS_BLE			0x00000010
+/* L3 csum validation performed */
+#define DPAA2_FAS_L3CV			0x00000008
+/* L3 csum error */
+#define DPAA2_FAS_L3CE			0x00000004
+/* L4 csum validation performed */
+#define DPAA2_FAS_L4CV			0x00000002
+/* L4 csum error */
+#define DPAA2_FAS_L4CE			0x00000001
+/* Possible errors on the ingress path */
+#define DPAA2_ETH_RX_ERR_MASK		(DPAA2_FAS_KSE		| \
+					 DPAA2_FAS_EOFHE	| \
+					 DPAA2_FAS_MNLE		| \
+					 DPAA2_FAS_TIDE		| \
+					 DPAA2_FAS_PIEE		| \
+					 DPAA2_FAS_FLE		| \
+					 DPAA2_FAS_FPE		| \
+					 DPAA2_FAS_PTE		| \
+					 DPAA2_FAS_ISP		| \
+					 DPAA2_FAS_PHE		| \
+					 DPAA2_FAS_BLE		| \
+					 DPAA2_FAS_L3CE		| \
+					 DPAA2_FAS_L4CE)
+/* Tx errors */
+#define DPAA2_ETH_TXCONF_ERR_MASK	(DPAA2_FAS_KSE		| \
+					 DPAA2_FAS_EOFHE	| \
+					 DPAA2_FAS_MNLE		| \
+					 DPAA2_FAS_TIDE)
+
+/* Time in milliseconds between link state updates */
+#define DPAA2_ETH_LINK_STATE_REFRESH	1000
+
+/* Number of times to retry a frame enqueue before giving up.
+ * Value determined empirically, in order to minimize the number
+ * of frames dropped on Tx
+ */
+#define DPAA2_ETH_ENQUEUE_RETRIES	10
+
+/* Maximum number of queues associated with a DPNI */
+#define DPAA2_ETH_MAX_RX_QUEUES		16
+#define DPAA2_ETH_MAX_TX_QUEUES		NR_CPUS
+#define DPAA2_ETH_MAX_QUEUES		(DPAA2_ETH_MAX_RX_QUEUES + \
+					DPAA2_ETH_MAX_TX_QUEUES)
+
+#define DPAA2_ETH_MAX_DPCONS		NR_CPUS
+
+enum dpaa2_eth_fq_type {
+	DPAA2_RX_FQ = 0,
+	DPAA2_TX_CONF_FQ,
+};
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_eth_fq {
+	u32 fqid;
+	u32 tx_qdbin;
+	u16 flowid;
+	int target_cpu;
+	struct dpaa2_eth_channel *channel;
+	enum dpaa2_eth_fq_type type;
+
+	void (*consume)(struct dpaa2_eth_priv *,
+			struct dpaa2_eth_channel *,
+			const struct dpaa2_fd *,
+			struct napi_struct *);
+};
+
+struct dpaa2_eth_channel {
+	struct dpaa2_io_notification_ctx nctx;
+	struct fsl_mc_device *dpcon;
+	int dpcon_id;
+	int ch_id;
+	int dpio_id;
+	struct napi_struct napi;
+	struct dpaa2_io_store *store;
+	struct dpaa2_eth_priv *priv;
+	int buf_count;
+};
+
+struct dpaa2_eth_hash_fields {
+	u64 rxnfc_field;
+	enum net_prot cls_prot;
+	int cls_field;
+	int size;
+};
+
+/* Driver private data */
+struct dpaa2_eth_priv {
+	struct net_device *net_dev;
+
+	u8 num_fqs;
+	struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+
+	u8 num_channels;
+	struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
+
+	int dpni_id;
+	struct dpni_attr dpni_attrs;
+	/* Insofar as the MC is concerned, we're using one layout on all 3 types
+	 * of buffers (Rx, Tx, Tx-Conf).
+	 */
+	struct dpni_buffer_layout buf_layout;
+	u16 tx_data_offset;
+
+	struct fsl_mc_device *dpbp_dev;
+	struct dpbp_attr dpbp_attrs;
+
+	u16 tx_qdid;
+	struct fsl_mc_io *mc_io;
+	/* Cores which have an affine DPIO/DPCON.
+	 * This is the cpu set on which Rx and Tx conf frames are processed
+	 */
+	struct cpumask dpio_cpumask;
+
+	/* Standard statistics */
+	struct rtnl_link_stats64 __percpu *percpu_stats;
+
+	u16 mc_token;
+
+	struct dpni_link_state link_state;
+	bool do_link_poll;
+	struct task_struct *poll_thread;
+};
+
+/* default Rx hash options, set during probing */
+#define DPAA2_RXH_SUPPORTED	(RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
+				| RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
+				| RXH_L4_B_2_3)
+
+#define dpaa2_eth_hash_enabled(priv)	\
+	((priv)->dpni_attrs.num_queues > 1)
+
+/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
+#define DPAA2_CLASSIFIER_DMA_SIZE 256
+
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
+
+static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
+{
+	return priv->dpni_attrs.num_queues;
+}
+
+#endif	/* __DPAA2_H */
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ