lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1425912115-27398-2-git-send-email-Emilian.Medve@Freescale.com>
Date:	Mon, 9 Mar 2015 09:41:46 -0500
From:	Emil Medve <Emilian.Medve@...escale.com>
To:	<scottwood@...escale.com>, <linuxppc-dev@...ts.ozlabs.org>,
	<linux-kernel@...r.kernel.org>
CC:	Geoff Thorpe <Geoff.Thorpe@...escale.com>
Subject: [RFC v3 01/10] fsl_bman: Add drivers for the Freescale DPAA BMan

From: Geoff Thorpe <Geoff.Thorpe@...escale.com>

Signed-off-by: Geoff Thorpe <Geoff.Thorpe@...escale.com>
---
 drivers/soc/fsl/Kconfig        |   52 ++
 drivers/soc/fsl/Makefile       |    9 +-
 drivers/soc/fsl/bman.c         |  610 ++++++++++++++++++++++++
 drivers/soc/fsl/bman.h         |  523 ++++++++++++++++++++
 drivers/soc/fsl/bman_api.c     | 1032 ++++++++++++++++++++++++++++++++++++++++
 drivers/soc/fsl/bman_portal.c  |  329 +++++++++++++
 drivers/soc/fsl/bman_priv.h    |  148 ++++++
 drivers/soc/fsl/dpaa_alloc.c   |  403 ++++++++++++++++
 drivers/soc/fsl/dpaa_sys.h     |  234 +++++++++
 drivers/soc/fsl/qbman_driver.c |   40 ++
 include/soc/fsl/bman.h         |  510 ++++++++++++++++++++
 11 files changed, 3889 insertions(+), 1 deletion(-)
 create mode 100644 drivers/soc/fsl/bman.c
 create mode 100644 drivers/soc/fsl/bman.h
 create mode 100644 drivers/soc/fsl/bman_api.c
 create mode 100644 drivers/soc/fsl/bman_portal.c
 create mode 100644 drivers/soc/fsl/bman_priv.h
 create mode 100644 drivers/soc/fsl/dpaa_alloc.c
 create mode 100644 drivers/soc/fsl/dpaa_sys.h
 create mode 100644 drivers/soc/fsl/qbman_driver.c
 create mode 100644 include/soc/fsl/bman.h

diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 38c08ae..b048a75 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -1 +1,53 @@
+menuconfig FSL_DPA
+	tristate "Freescale DPAA Buffer management"
+	depends on HAS_FSL_QBMAN
+	default y
+
+if FSL_DPA
+
+config FSL_DPA_CHECKING
+	bool "additional driver checking"
+	default n
+	---help---
+	  Compiles in additional checks to sanity-check the drivers and any
+	  use of it by other code. Not recommended for performance.
+
+config FSL_DPA_CAN_WAIT
+	bool
+	default y
+
+config FSL_DPA_CAN_WAIT_SYNC
+	bool
+	default y
+
+config FSL_DPA_PIRQ_FAST
+	bool
+	default y
+
+config FSL_DPA_PIRQ_SLOW
+	bool
+	default y
+
+config FSL_DPA_PORTAL_SHARE
+	bool
+	default y
+
+config FSL_BMAN
+	bool "Freescale Buffer Manager (BMan) support"
+	default y
+
+if FSL_BMAN
+
+config FSL_BMAN_CONFIG
+	bool "BMan device management"
+	default y
+	---help---
+	  If this linux image is running natively, you need this option. If this
+	  linux image is running as a guest OS under the hypervisor, only one
+	  guest OS ("the control plane") needs this option.
+
+endif # FSL_BMAN
+
+endif # FSL_DPA
+
 source "drivers/soc/fsl/fman/Kconfig"
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 20ab5d0..9599ac5 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -1 +1,8 @@
-obj-$(CONFIG_FSL_FMAN)	+= fman/
+# Common
+obj-$(CONFIG_FSL_DPA)				+= dpaa_alloc.o
+obj-$(CONFIG_HAS_FSL_QBMAN)			+= qbman_driver.o
+
+obj-$(CONFIG_FSL_BMAN)				+= bman_api.o
+obj-$(CONFIG_FSL_BMAN_CONFIG)			+= bman.o bman_portal.o
+
+obj-$(CONFIG_FSL_FMAN)				+= fman/
diff --git a/drivers/soc/fsl/bman.c b/drivers/soc/fsl/bman.c
new file mode 100644
index 0000000..b91e767
--- /dev/null
+++ b/drivers/soc/fsl/bman.c
@@ -0,0 +1,610 @@
+/* Copyright (c) 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/cacheflush.h>
+#include "bman_priv.h"
+
+/* Last updated for v00.79 of the BG */
+
+struct bman;
+
+/* Register offsets */
+#define REG_POOL_SWDET(n)	(0x0000 + ((n) * 0x04))
+#define REG_POOL_HWDET(n)	(0x0100 + ((n) * 0x04))
+#define REG_POOL_SWDXT(n)	(0x0200 + ((n) * 0x04))
+#define REG_POOL_HWDXT(n)	(0x0300 + ((n) * 0x04))
+#define REG_POOL_CONTENT(n)	(0x0600 + ((n) * 0x04))
+#define REG_FBPR_FPC		0x0800
+#define REG_ECSR		0x0a00
+#define REG_ECIR		0x0a04
+#define REG_EADR		0x0a08
+#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1		0x0bf8
+#define REG_IP_REV_2		0x0bfc
+#define REG_FBPR_BARE		0x0c00
+#define REG_FBPR_BAR		0x0c04
+#define REG_FBPR_AR		0x0c10
+#define REG_SRCIDR		0x0d04
+#define REG_LIODNR		0x0d08
+#define REG_ERR_ISR		0x0e00	/* + "enum bm_isr_reg" */
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI	0x00000010	/* Invalid Command Verb */
+#define BM_EIRQ_FLWI	0x00000008	/* FBPR Low Watermark */
+#define BM_EIRQ_MBEI	0x00000004	/* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI	0x00000002	/* Single-bit ECC Error */
+#define BM_EIRQ_BSCN	0x00000001	/* pool State Change Notification */
+
+/* BMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR	(BM_EIRQ_IVCI)
+
+union bman_ecir {
+	u32 ecir_raw;
+	struct {
+		u32 __reserved1:4;
+		u32 portal_num:4;
+		u32 __reserved2:12;
+		u32 numb:4;
+		u32 __reserved3:2;
+		u32 pid:6;
+	} __packed info;
+};
+
+union bman_eadr {
+	u32 eadr_raw;
+	struct {
+		u32 __reserved1:5;
+		u32 memid:3;
+		u32 __reserved2:14;
+		u32 eadr:10;
+	} __packed info;
+};
+
+struct bman_hwerr_txt {
+	u32 mask;
+	const char *txt;
+};
+
+#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+	BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
+	BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
+	BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
+	BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
+	BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
+};
+#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
+
+struct bman_error_info_mdata {
+	u16 addr_mask;
+	u16 bits;
+	const char *txt;
+};
+
+#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
+static const struct bman_error_info_mdata error_mdata[] = {
+	BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
+	BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
+	BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
+};
+#define BMAN_ERR_MDATA_COUNT \
+	(sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
+
+/* Add this in Kconfig */
+#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
+
+/**
+ * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
+ * @v: for accessors that write values, this is the 32-bit value
+ *
+ * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
+ * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
+ * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
+ * "write the enable register" rather than "enable the write register"!
+ */
+#define bm_err_isr_status_read(bm)	\
+		__bm_err_isr_read(bm, bm_isr_status)
+#define bm_err_isr_status_clear(bm, m)	\
+		__bm_err_isr_write(bm, bm_isr_status, m)
+#define bm_err_isr_enable_read(bm)	\
+		__bm_err_isr_read(bm, bm_isr_enable)
+#define bm_err_isr_enable_write(bm, v)	\
+		__bm_err_isr_write(bm, bm_isr_enable, v)
+#define bm_err_isr_disable_read(bm)	\
+		__bm_err_isr_read(bm, bm_isr_disable)
+#define bm_err_isr_disable_write(bm, v)	\
+		__bm_err_isr_write(bm, bm_isr_disable, v)
+#define bm_err_isr_inhibit(bm)		\
+		__bm_err_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_err_isr_uninhibit(bm)	\
+		__bm_err_isr_write(bm, bm_isr_inhibit, 0)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
+ * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
+ */
+
+/* Encapsulate "struct bman *" as a cast of the register space address. */
+
+static struct bman *bm_create(void *regs)
+{
+	return (struct bman *)regs;
+}
+
+static inline u32 __bm_in(struct bman *bm, u32 offset)
+{
+	return in_be32((void *)bm + offset);
+}
+static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
+{
+	out_be32((void *)bm + offset, val);
+}
+#define bm_in(reg)		__bm_in(bm, REG_##reg)
+#define bm_out(reg, val)	__bm_out(bm, REG_##reg, val)
+
+static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
+{
+	return __bm_in(bm, REG_ERR_ISR + (n << 2));
+}
+
+static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
+{
+	__bm_out(bm, REG_ERR_ISR + (n << 2), val);
+}
+
+static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
+{
+	u32 v = bm_in(IP_REV_1);
+	*id = (v >> 16);
+	*major = (v >> 8) & 0xff;
+	*minor = v & 0xff;
+}
+
+static u32 __generate_thresh(u32 val, int roundup)
+{
+	u32 e = 0;	/* co-efficient, exponent */
+	int oddbit = 0;
+	while (val > 0xff) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+	DPA_ASSERT(e < 0x10);
+	return val | (e << 8);
+}
+
+static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
+			u32 hwdet, u32 hwdxt)
+{
+	DPA_ASSERT(pool < bman_pool_max);
+	bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
+	bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
+	bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
+	bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
+}
+
+static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
+{
+	u32 exp = ilog2(size);
+	/* choke if size isn't within range */
+	DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
+			is_power_of_2(size));
+	/* choke if '[e]ba' has lower-alignment than 'size' */
+	DPA_ASSERT(!(ba & (size - 1)));
+	bm_out(FBPR_BARE, upper_32_bits(ba));
+	bm_out(FBPR_BAR, lower_32_bits(ba));
+	bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
+}
+
+/*****************/
+/* Config driver */
+/*****************/
+
+/* We support only one of these. */
+static struct bman *bm;
+static struct device_node *bm_node;
+
+/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used
+ * during bman_init_ccsr(). */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+	fbpr_a = rmem->base;
+	fbpr_sz = rmem->size;
+
+	WARN_ON(!(fbpr_a && fbpr_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static int __init fsl_bman_init(struct device_node *node)
+{
+	struct resource res;
+	u32 __iomem *regs;
+	int ret;
+	u16 id;
+	u8 major, minor;
+
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		pr_err("Can't get %s property 'reg'\n",
+				node->full_name);
+		return ret;
+	}
+
+	/* Global configuration */
+	regs = ioremap(res.start, res.end - res.start + 1);
+	bm = bm_create(regs);
+	BUG_ON(!bm);
+	bm_node = node;
+	bm_get_version(bm, &id, &major, &minor);
+	pr_info("BMan ver:%04x,%02x,%02x\n", id, major, minor);
+	if ((major == 1) && (minor == 0)) {
+		bman_ip_rev = BMAN_REV10;
+		bman_pool_max = 64;
+	} else if ((major == 2) && (minor == 0)) {
+		bman_ip_rev = BMAN_REV20;
+		bman_pool_max = 8;
+	} else if ((major == 2) && (minor == 1)) {
+		bman_ip_rev = BMAN_REV21;
+		bman_pool_max = 64;
+	} else {
+		pr_warn("unknown BMan version, default to rev1.0\n");
+	}
+
+	return 0;
+}
+
+int bman_have_ccsr(void)
+{
+	return bm ? 1 : 0;
+}
+
+int bm_pool_set(u32 bpid, const u32 *thresholds)
+{
+	if (!bm)
+		return -ENODEV;
+	bm_set_pool(bm, bpid, thresholds[0], thresholds[1],
+		thresholds[2], thresholds[3]);
+	return 0;
+}
+EXPORT_SYMBOL(bm_pool_set);
+
+__init void bman_init_early(void)
+{
+	struct device_node *dn;
+	int ret;
+
+	for_each_compatible_node(dn, NULL, "fsl,bman") {
+		if (bm)
+			pr_err("%s: only one 'fsl,bman' allowed\n",
+				dn->full_name);
+		else {
+			if (!of_device_is_available(dn))
+				continue;
+
+			ret = fsl_bman_init(dn);
+			BUG_ON(ret);
+		}
+	}
+}
+
+static void log_edata_bits(u32 bit_count)
+{
+	u32 i, j, mask = 0xffffffff;
+
+	pr_warn("BMan ErrInt, EDATA:\n");
+	i = bit_count/32;
+	if (bit_count%32) {
+		i++;
+		mask = ~(mask << bit_count%32);
+	}
+	j = 16-i;
+	pr_warn("  0x%08x\n", bm_in(EDATA(j)) & mask);
+	j++;
+	for (; j < 16; j++)
+		pr_warn("  0x%08x\n", bm_in(EDATA(j)));
+}
+
+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+{
+	union bman_ecir ecir_val;
+	union bman_eadr eadr_val;
+
+	ecir_val.ecir_raw = bm_in(ECIR);
+	/* Is portal info valid */
+	if (ecsr_val & PORTAL_ECSR_ERR) {
+		pr_warn("BMan ErrInt: SWP id %d, numb %d, pid %d\n",
+			ecir_val.info.portal_num, ecir_val.info.numb,
+			ecir_val.info.pid);
+	}
+	if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
+		eadr_val.eadr_raw = bm_in(EADR);
+		pr_warn("BMan ErrInt: EADR Memory: %s, 0x%x\n",
+			error_mdata[eadr_val.info.memid].txt,
+			error_mdata[eadr_val.info.memid].addr_mask
+				& eadr_val.info.eadr);
+		log_edata_bits(error_mdata[eadr_val.info.memid].bits);
+	}
+}
+
+/* BMan interrupt handler */
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+
+	ier_val = bm_err_isr_enable_read(bm);
+	isr_val = bm_err_isr_status_read(bm);
+	ecsr_val = bm_in(ECSR);
+	isr_mask = isr_val & ier_val;
+
+	if (!isr_mask)
+		return IRQ_NONE;
+	for (i = 0; i < BMAN_HWE_COUNT; i++) {
+		if (bman_hwerr_txts[i].mask & isr_mask) {
+			pr_warn("BMan ErrInt: %s\n", bman_hwerr_txts[i].txt);
+			if (bman_hwerr_txts[i].mask & ecsr_val) {
+				log_additional_error_info(isr_mask, ecsr_val);
+				/* Re-arm error capture registers */
+				bm_out(ECSR, ecsr_val);
+			}
+			if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
+				pr_devel("BMan un-enabling error 0x%x\n",
+					bman_hwerr_txts[i].mask);
+				ier_val &= ~bman_hwerr_txts[i].mask;
+				bm_err_isr_enable_write(bm, ier_val);
+			}
+		}
+	}
+	bm_err_isr_status_clear(bm, isr_val);
+	return IRQ_HANDLED;
+}
+
+static int __bind_irq(void)
+{
+	int ret, err_irq;
+
+	err_irq = of_irq_to_resource(bm_node, 0, NULL);
+	if (err_irq == NO_IRQ) {
+		pr_info("Can't get %s property '%s'\n", bm_node->full_name,
+			"interrupts");
+		return -ENODEV;
+	}
+	ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node);
+	if (ret)  {
+		pr_err("request_irq() failed %d for '%s'\n", ret,
+			bm_node->full_name);
+		return -ENODEV;
+	}
+	/* Disable Buffer Pool State Change */
+	bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
+	/* Write-to-clear any stale bits, (eg. starvation being asserted prior
+	 * to resource allocation during driver init). */
+	bm_err_isr_status_clear(bm, 0xffffffff);
+	/* Enable Error Interrupts */
+	bm_err_isr_enable_write(bm, 0xffffffff);
+	return 0;
+}
+
+int bman_init_ccsr(struct device_node *node)
+{
+	int ret;
+	if (!bman_have_ccsr())
+		return 0;
+	if (node != bm_node)
+		return -EINVAL;
+	/* FBPR memory */
+	bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
+	ret = __bind_irq();
+	if (ret)
+		return ret;
+	return 0;
+}
+
+u32 bm_pool_free_buffers(u32 bpid)
+{
+	return bm_in(POOL_CONTENT(bpid));
+}
+
+#ifdef CONFIG_SYSFS
+
+#define DRV_NAME "fsl-bman"
+
+static ssize_t show_fbpr_fpc(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
+};
+
+static ssize_t show_pool_count(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	u32 data;
+	int i;
+
+	if (!sscanf(dev_attr->attr.name, "%d", &i))
+		return -EINVAL;
+	data = bm_in(POOL_CONTENT(i));
+	return snprintf(buf, PAGE_SIZE, "%d\n", data);
+};
+
+static ssize_t show_err_isr(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
+};
+
+static ssize_t show_sbec(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	int i;
+
+	if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
+		return -EINVAL;
+	return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
+};
+
+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
+static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
+
+/* Didn't use DEVICE_ATTR as 64 of this would be required.
+ * Initialize them when needed. */
+static char *name_attrs_pool_count; /* "xx" + null-terminator */
+static struct device_attribute *dev_attr_buffer_pool_count;
+
+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+
+static struct attribute *bman_dev_attributes[] = {
+	&dev_attr_fbpr_fpc.attr,
+	&dev_attr_err_isr.attr,
+	NULL
+};
+
+static struct attribute *bman_dev_ecr_attributes[] = {
+	&dev_attr_sbec_0.attr,
+	&dev_attr_sbec_1.attr,
+	NULL
+};
+
+static struct attribute **bman_dev_pool_count_attributes;
+
+/* root level */
+static const struct attribute_group bman_dev_attr_grp = {
+	.name = NULL,
+	.attrs = bman_dev_attributes
+};
+static const struct attribute_group bman_dev_ecr_grp = {
+	.name = "error_capture",
+	.attrs = bman_dev_ecr_attributes
+};
+static struct attribute_group bman_dev_pool_countent_grp = {
+	.name = "pool_count",
+};
+
+static int of_fsl_bman_remove(struct platform_device *ofdev)
+{
+	sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
+	return 0;
+};
+
+static int of_fsl_bman_probe(struct platform_device *ofdev)
+{
+	int ret, i;
+	struct device *dev = &ofdev->dev;
+
+	ret = sysfs_create_group(&dev->kobj, &bman_dev_attr_grp);
+	if (ret)
+		goto done;
+	ret = sysfs_create_group(&dev->kobj, &bman_dev_ecr_grp);
+	if (ret)
+		goto del_group_0;
+
+	name_attrs_pool_count = devm_kmalloc(dev,
+		sizeof(char) * bman_pool_max * 3, GFP_KERNEL);
+	if (!name_attrs_pool_count) {
+		dev_err(dev, "Can't alloc name_attrs_pool_count\n");
+		goto del_group_1;
+	}
+
+	dev_attr_buffer_pool_count = devm_kmalloc(dev,
+		sizeof(struct device_attribute) * bman_pool_max, GFP_KERNEL);
+	if (!dev_attr_buffer_pool_count) {
+		dev_err(dev, "Can't alloc dev_attr_buffer_pool_count\n");
+		goto del_group_1;
+	}
+
+	bman_dev_pool_count_attributes = devm_kmalloc(dev,
+		sizeof(struct attribute *) * (bman_pool_max + 1), GFP_KERNEL);
+	if (!bman_dev_pool_count_attributes) {
+		dev_err(dev, "Can't alloc bman_dev_pool_count_attributes\n");
+		goto del_group_1;
+	}
+
+	for (i = 0; i < bman_pool_max; i++) {
+		ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
+		if (!ret)
+			goto del_group_1;
+		dev_attr_buffer_pool_count[i].attr.name =
+			(name_attrs_pool_count + i * 3);
+		dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
+		dev_attr_buffer_pool_count[i].show = show_pool_count;
+		bman_dev_pool_count_attributes[i] =
+			&dev_attr_buffer_pool_count[i].attr;
+	}
+	bman_dev_pool_count_attributes[bman_pool_max] = NULL;
+
+	bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
+
+	ret = sysfs_create_group(&dev->kobj, &bman_dev_pool_countent_grp);
+	if (ret)
+		goto del_group_1;
+
+	goto done;
+
+del_group_1:
+	sysfs_remove_group(&dev->kobj, &bman_dev_ecr_grp);
+del_group_0:
+	sysfs_remove_group(&dev->kobj, &bman_dev_attr_grp);
+done:
+	if (ret)
+		dev_err(dev, "Cannot create dev attributes ret=%d\n", ret);
+	return ret;
+};
+
+static const struct of_device_id of_fsl_bman_ids[] = {
+	{
+		.compatible = "fsl,bman",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_bman_ids);
+
+static struct platform_driver of_fsl_bman_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = of_fsl_bman_ids,
+	},
+	.probe = of_fsl_bman_probe,
+	.remove = of_fsl_bman_remove,
+};
+
+module_platform_driver(of_fsl_bman_driver);
+
+#endif /* CONFIG_SYSFS */
diff --git a/drivers/soc/fsl/bman.h b/drivers/soc/fsl/bman.h
new file mode 100644
index 0000000..025b76b
--- /dev/null
+++ b/drivers/soc/fsl/bman.h
@@ -0,0 +1,523 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+/***************************/
+/* Portal register assists */
+/***************************/
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH	0x0000
+#define BM_REG_RCR_CI_CINH	0x0004
+#define BM_REG_RCR_ITR		0x0008
+#define BM_REG_CFG		0x0100
+#define BM_REG_SCN(n)		(0x0200 + ((n) << 2))
+#define BM_REG_ISR		0x0e00
+#define BM_REG_IIR		0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR		0x0000
+#define BM_CL_RR0		0x0100
+#define BM_CL_RR1		0x0140
+#define BM_CL_RCR		0x1000
+#define BM_CL_RCR_PI_CENA	0x3000
+#define BM_CL_RCR_CI_CENA	0x3100
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent). */
+
+/* Cache-inhibited register access. */
+#define __bm_in(bm, o)		__raw_readl((bm)->addr_ci + (o))
+#define __bm_out(bm, o, val)	__raw_writel((val), (bm)->addr_ci + (o))
+#define bm_in(reg)		__bm_in(&portal->addr, BM_REG_##reg)
+#define bm_out(reg, val)	__bm_out(&portal->addr, BM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
+#define __bm_cl_in(bm, o)	__raw_readl((bm)->addr_ce + (o))
+#define __bm_cl_out(bm, o, val) \
+	do { \
+		u32 *__tmpclout = (bm)->addr_ce + (o); \
+		__raw_writel((val), __tmpclout); \
+		dcbf(__tmpclout); \
+	} while (0)
+#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_in(reg)	    __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
+#define bm_cl_invalidate(reg)\
+	__bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues. */
+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+	/* 'first' is included, 'last' is excluded */
+	if (first <= last)
+		return last - first;
+	return ringsize + last - first;
+}
+
+/* Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode {		/* matches BCSP_CFG::RPM */
+	bm_rcr_pci = 0,		/* PI index, cache-inhibited */
+	bm_rcr_pce = 1,		/* PI index, cache-enabled */
+	bm_rcr_pvb = 2		/* valid-bit */
+};
+enum bm_rcr_cmode {		/* s/w-only */
+	bm_rcr_cci,		/* CI index, cache-inhibited */
+	bm_rcr_cce		/* CI index, cache-enabled */
+};
+
+
+/* ------------------------- */
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE		8
+
+struct bm_rcr {
+	struct bm_rcr_entry *ring, *cursor;
+	u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	u32 busy;
+	enum bm_rcr_pmode pmode;
+	enum bm_rcr_cmode cmode;
+#endif
+};
+
+struct bm_mc {
+	struct bm_mc_command *cr;
+	struct bm_mc_result *rr;
+	u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	enum {
+		/* Can only be _mc_start()ed */
+		mc_idle,
+		/* Can only be _mc_commit()ed or _mc_abort()ed */
+		mc_user,
+		/* Can only be _mc_retry()ed */
+		mc_hw
+	} state;
+#endif
+};
+
+struct bm_addr {
+	void __iomem *addr_ce;	/* cache-enabled */
+	void __iomem *addr_ci;	/* cache-inhibited */
+};
+
+struct bm_portal {
+	struct bm_addr addr;
+	struct bm_rcr rcr;
+	struct bm_mc mc;
+	struct bm_portal_config config;
+} ____cacheline_aligned;
+
+
+/* --------------- */
+/* --- RCR API --- */
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define RCR_CARRYCLEAR(p) \
+	(void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
+{
+	return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void RCR_INC(struct bm_rcr *rcr)
+{
+	/* NB: this is odd-looking, but experiments show that it generates
+	 * fast code with essentially no branching overheads. We increment to
+	 * the next RCR pointer and handle overflow and 'vbit'. */
+	struct bm_rcr_entry *partial = rcr->cursor + 1;
+	rcr->cursor = RCR_CARRYCLEAR(partial);
+	if (partial != rcr->cursor)
+		rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+		__maybe_unused enum bm_rcr_cmode cmode)
+{
+	/* This use of 'register', as well as all other occurrences, is because
+	 * it has been observed to generate much faster code with gcc than is
+	 * otherwise the case. */
+	register struct bm_rcr *rcr = &portal->rcr;
+	u32 cfg;
+	u8 pi;
+
+	rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
+	rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+	pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+	rcr->cursor = rcr->ring + pi;
+	rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ?  BM_RCR_VERB_VBIT : 0;
+	rcr->available = BM_RCR_SIZE - 1
+		- bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+	rcr->ithresh = bm_in(RCR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+	rcr->pmode = pmode;
+	rcr->cmode = cmode;
+#endif
+	cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
+	bm_out(CFG, cfg);
+	return 0;
+}
+
+static inline void bm_rcr_finish(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+	u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+	DPA_ASSERT(!rcr->busy);
+	if (pi != RCR_PTR2IDX(rcr->cursor))
+		pr_crit("losing uncommited RCR entries\n");
+	if (ci != rcr->ci)
+		pr_crit("missing existing RCR completions\n");
+	if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
+		pr_crit("RCR destroyed unquiesced\n");
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	DPA_ASSERT(!rcr->busy);
+	if (!rcr->available)
+		return NULL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 1;
+#endif
+	dcbz_64(rcr->cursor);
+	return rcr->cursor;
+}
+
+static inline void bm_rcr_abort(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+	DPA_ASSERT(rcr->busy);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
+					struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	DPA_ASSERT(rcr->busy);
+	DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
+	if (rcr->available == 1)
+		return NULL;
+	rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+	dcbf_64(rcr->cursor);
+	RCR_INC(rcr);
+	rcr->available--;
+	dcbz_64(rcr->cursor);
+	return rcr->cursor;
+}
+
+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	DPA_ASSERT(rcr->busy);
+	DPA_ASSERT(rcr->pmode == bm_rcr_pci);
+	rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+	RCR_INC(rcr);
+	rcr->available--;
+	hwsync();
+	bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+	DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+	bm_cl_invalidate(RCR_PI);
+	bm_cl_touch_rw(RCR_PI);
+}
+
+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	DPA_ASSERT(rcr->busy);
+	DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+	rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+	RCR_INC(rcr);
+	rcr->available--;
+	lwsync();
+	bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	struct bm_rcr_entry *rcursor;
+	DPA_ASSERT(rcr->busy);
+	DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
+	lwsync();
+	rcursor = rcr->cursor;
+	rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
+	dcbf_64(rcursor);
+	RCR_INC(rcr);
+	rcr->available--;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	u8 diff, old_ci = rcr->ci;
+	DPA_ASSERT(rcr->cmode == bm_rcr_cci);
+	rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+	diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+	rcr->available += diff;
+	return diff;
+}
+
+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+	DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+	bm_cl_touch_ro(RCR_CI);
+}
+
+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	u8 diff, old_ci = rcr->ci;
+	DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+	rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
+	bm_cl_invalidate(RCR_CI);
+	diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+	rcr->available += diff;
+	return diff;
+}
+
+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	return rcr->ithresh;
+}
+
+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	rcr->ithresh = ithresh;
+	bm_out(RCR_ITR, ithresh);
+}
+
+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	return rcr->available;
+}
+
+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+
+/* ------------------------------ */
+/* --- Management command API --- */
+
+static inline int bm_mc_init(struct bm_portal *portal)
+{
+	register struct bm_mc *mc = &portal->mc;
+	mc->cr = portal->addr.addr_ce + BM_CL_CR;
+	mc->rr = portal->addr.addr_ce + BM_CL_RR0;
+	mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+			BM_MCC_VERB_VBIT) ?  0 : 1;
+	mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_idle;
+#endif
+	return 0;
+}
+
+static inline void bm_mc_finish(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_mc *mc = &portal->mc;
+	DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (mc->state != mc_idle)
+		pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+	register struct bm_mc *mc = &portal->mc;
+	DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_user;
+#endif
+	dcbz_64(mc->cr);
+	return mc->cr;
+}
+
+static inline void bm_mc_abort(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_mc *mc = &portal->mc;
+	DPA_ASSERT(mc->state == mc_user);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_idle;
+#endif
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_mc *mc = &portal->mc;
+	struct bm_mc_result *rr = mc->rr + mc->rridx;
+	DPA_ASSERT(mc->state == mc_user);
+	lwsync();
+	mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+	dcbf(mc->cr);
+	dcbit_ro(rr);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_hw;
+#endif
+}
+
+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+	register struct bm_mc *mc = &portal->mc;
+	struct bm_mc_result *rr = mc->rr + mc->rridx;
+	DPA_ASSERT(mc->state == mc_hw);
+	/* The inactive response register's verb byte always returns zero until
+	 * its command is submitted and completed. This includes the valid-bit,
+	 * in case you were wondering... */
+	if (!__raw_readb(&rr->verb)) {
+		dcbit_ro(rr);
+		return NULL;
+	}
+	mc->rridx ^= 1;
+	mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_idle;
+#endif
+	return rr;
+}
+
+
+/* ------------------------------------- */
+/* --- Portal interrupt register API --- */
+
+static inline int bm_isr_init(__always_unused struct bm_portal *portal)
+{
+	return 0;
+}
+
+static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
+{
+}
+
+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
+					int enable)
+{
+	u32 val;
+	DPA_ASSERT(bpid < bman_pool_max);
+	/* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
+	val = __bm_in(&portal->addr, SCN_REG(bpid));
+	if (enable)
+		val |= SCN_BIT(bpid);
+	else
+		val &= ~SCN_BIT(bpid);
+	__bm_out(&portal->addr, SCN_REG(bpid), val);
+}
+
+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
+{
+	return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
+}
+
+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
+					u32 val)
+{
+	__bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
+}
+
+/* Buffer Pool Cleanup */
+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
+{
+	struct bm_mc_command *bm_cmd;
+	struct bm_mc_result *bm_res;
+
+	int aq_count = 0;
+	bool stop = false;
+	while (!stop) {
+		/* Acquire buffers until empty */
+		bm_cmd = bm_mc_start(p);
+		bm_cmd->acquire.bpid = bpid;
+		bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE |  1);
+		while (!(bm_res = bm_mc_result(p)))
+			cpu_relax();
+		if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+			/* Pool is empty */
+			/* TBD : Should we do a few extra iterations in
+			   case some other some blocks keep buffers 'on deck',
+			   which may also be problematic */
+			stop = true;
+		} else
+			++aq_count;
+	}
+	return 0;
+}
diff --git a/drivers/soc/fsl/bman_api.c b/drivers/soc/fsl/bman_api.c
new file mode 100644
index 0000000..3211a4c
--- /dev/null
+++ b/drivers/soc/fsl/bman_api.c
@@ -0,0 +1,1032 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman.h"
+
+/* Compilation constants */
+#define RCR_THRESH	2	/* reread h/w CI when running out of space */
+#define IRQNAME		"BMan portal %d"
+#define MAX_IRQNAME	16	/* big enough for "BMan portal %d" */
+
+struct bman_portal {
+	struct bm_portal p;
+	/* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+	struct bman_depletion *pools;
+	int thresh_set;
+	unsigned long irq_sources;
+	u32 slowpoll;	/* only used when interrupts are off */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+	struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
+#endif
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+	raw_spinlock_t sharing_lock; /* only used if is_shared */
+	int is_shared;
+	struct bman_portal *sharing_redirect;
+#endif
+	/* When the cpu-affine portal is activated, this is non-NULL */
+	const struct bm_portal_config *config;
+	/* 64-entry hash-table of pool objects that are tracking depletion
+	 * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
+	 * we're not fussy about cache-misses and so forth - whereas the above
+	 * members should all fit in one cacheline.
+	 * BTW, with 64 entries in the hash table and 64 buffer pools to track,
+	 * you'll never guess the hash-function ... */
+	struct bman_pool *cb[64];
+	char irqname[MAX_IRQNAME];
+	/* Track if the portal was alloced by the driver */
+	u8 alloced;
+};
+
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+	do { \
+		if ((p)->is_shared) \
+			raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
+		else \
+			local_irq_save(irqflags); \
+	} while (0)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+	do { \
+		if ((p)->is_shared) \
+			raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
+						   irqflags); \
+		else \
+			local_irq_restore(irqflags); \
+	} while (0)
+#else
+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
+#endif
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+static inline struct bman_portal *get_raw_affine_portal(void)
+{
+	return &get_cpu_var(bman_affine_portal);
+}
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+static inline struct bman_portal *get_affine_portal(void)
+{
+	struct bman_portal *p = get_raw_affine_portal();
+	if (p->sharing_redirect)
+		return p->sharing_redirect;
+	return p;
+}
+#else
+#define get_affine_portal() get_raw_affine_portal()
+#endif
+static inline void put_affine_portal(void)
+{
+	put_cpu_var(bman_affine_portal);
+}
+static inline struct bman_portal *get_poll_portal(void)
+{
+	return this_cpu_ptr(&bman_affine_portal);
+}
+#define put_poll_portal()
+
+/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals. */
+struct bman_pool {
+	struct bman_pool_params params;
+	/* Used for hash-table admin when using depletion notifications. */
+	struct bman_portal *portal;
+	struct bman_pool *next;
+	/* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
+	struct bm_buffer *sp;
+	unsigned int sp_fill;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	atomic_t in_use;
+#endif
+};
+
+/* (De)Registration of depletion notification callbacks */
+static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
+{
+	__maybe_unused unsigned long irqflags;
+	pool->portal = portal;
+	PORTAL_IRQ_LOCK(portal, irqflags);
+	pool->next = portal->cb[pool->params.bpid];
+	portal->cb[pool->params.bpid] = pool;
+	if (!pool->next)
+		/* First object for that bpid on this portal, enable the BSCN
+		 * mask bit. */
+		bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
+	PORTAL_IRQ_UNLOCK(portal, irqflags);
+}
+static void depletion_unlink(struct bman_pool *pool)
+{
+	struct bman_pool *it, *last = NULL;
+	struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
+	__maybe_unused unsigned long irqflags;
+	PORTAL_IRQ_LOCK(pool->portal, irqflags);
+	it = *base;	/* <-- gotcha, don't do this prior to the irq_save */
+	while (it != pool) {
+		last = it;
+		it = it->next;
+	}
+	if (!last)
+		*base = pool->next;
+	else
+		last->next = pool->next;
+	if (!last && !pool->next) {
+		/* Last object for that bpid on this portal, disable the BSCN
+		 * mask bit. */
+		bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
+		/* And "forget" that we last saw this pool as depleted */
+		bman_depletion_unset(&pool->portal->pools[1],
+					pool->params.bpid);
+	}
+	PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
+}
+
+/* In the case that the application's core loop calls
+ * bman_poll(), we ought to balance how often we incur the overheads of the
+ * slow-path poll. We'll use two decrementer sources. The idle decrementer
+ * constant is used when the last slow-poll detected no work to do, and the busy
+ * decrementer constant when the last slow-poll had work to do. */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+	struct bman_portal *p = ptr;
+	u32 clear = p->irq_sources;
+	u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
+	clear |= __poll_portal_slow(p, is);
+	bm_isr_status_clear(&p->p, clear);
+	return IRQ_HANDLED;
+}
+
+
+struct bman_portal *bman_create_portal(
+				       struct bman_portal *portal,
+				       const struct bm_portal_config *config)
+{
+	struct bm_portal *__p;
+	const struct bman_depletion *pools = &config->public_cfg.mask;
+	int ret;
+	u8 bpid = 0;
+
+	if (!portal) {
+		portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+		if (!portal)
+			return portal;
+		portal->alloced = 1;
+	} else
+		portal->alloced = 0;
+
+	__p = &portal->p;
+
+	/* prep the low-level portal struct with the mapped addresses from the
+	 * config, everything that follows depends on it and "config" is more
+	 * for (de)reference... */
+	__p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+	__p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+	if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
+		pr_err("BMan RCR initialisation failed\n");
+		goto fail_rcr;
+	}
+	if (bm_mc_init(__p)) {
+		pr_err("BMan MC initialisation failed\n");
+		goto fail_mc;
+	}
+	if (bm_isr_init(__p)) {
+		pr_err("BMan ISR initialisation failed\n");
+		goto fail_isr;
+	}
+	portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
+	if (!portal->pools)
+		goto fail_pools;
+	portal->pools[0] = *pools;
+	bman_depletion_init(portal->pools + 1);
+	while (bpid < bman_pool_max) {
+		/* Default to all BPIDs disabled, we enable as required at
+		 * run-time. */
+		bm_isr_bscn_mask(__p, bpid, 0);
+		bpid++;
+	}
+	portal->slowpoll = 0;
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+	portal->rcri_owned = NULL;
+#endif
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+	raw_spin_lock_init(&portal->sharing_lock);
+	portal->is_shared = config->public_cfg.is_shared;
+	portal->sharing_redirect = NULL;
+#endif
+	memset(&portal->cb, 0, sizeof(portal->cb));
+	/* Write-to-clear any stale interrupt status bits */
+	bm_isr_disable_write(__p, 0xffffffff);
+	portal->irq_sources = 0;
+	bm_isr_enable_write(__p, portal->irq_sources);
+	bm_isr_status_clear(__p, 0xffffffff);
+	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
+	if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
+			portal)) {
+		pr_err("request_irq() failed\n");
+		goto fail_irq;
+	}
+	if ((config->public_cfg.cpu != -1) &&
+			irq_can_set_affinity(config->public_cfg.irq) &&
+			irq_set_affinity(config->public_cfg.irq,
+				cpumask_of(config->public_cfg.cpu))) {
+		pr_err("irq_set_affinity() failed\n");
+		goto fail_affinity;
+	}
+
+	/* Need RCR to be empty before continuing */
+	ret = bm_rcr_get_fill(__p);
+	if (ret) {
+		pr_err("BMan RCR unclean\n");
+		goto fail_rcr_empty;
+	}
+	/* Success */
+	portal->config = config;
+
+	bm_isr_disable_write(__p, 0);
+	bm_isr_uninhibit(__p);
+	return portal;
+fail_rcr_empty:
+fail_affinity:
+	free_irq(config->public_cfg.irq, portal);
+fail_irq:
+	kfree(portal->pools);
+fail_pools:
+	bm_isr_finish(__p);
+fail_isr:
+	bm_mc_finish(__p);
+fail_mc:
+	bm_rcr_finish(__p);
+fail_rcr:
+	if (portal->alloced)
+		kfree(portal);
+	return NULL;
+}
+
+struct bman_portal *bman_create_affine_portal(
+			const struct bm_portal_config *config)
+{
+	struct bman_portal *portal;
+
+	portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
+	portal = bman_create_portal(portal, config);
+	if (portal) {
+		spin_lock(&affine_mask_lock);
+		cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
+		spin_unlock(&affine_mask_lock);
+	}
+	return portal;
+}
+
+
+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
+								int cpu)
+{
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+	struct bman_portal *p;
+	p = &per_cpu(bman_affine_portal, cpu);
+	BUG_ON(p->config);
+	BUG_ON(p->is_shared);
+	BUG_ON(!redirect->config->public_cfg.is_shared);
+	p->irq_sources = 0;
+	p->sharing_redirect = redirect;
+	put_affine_portal();
+	return p;
+#else
+	BUG();
+	return NULL;
+#endif
+}
+
+void bman_destroy_portal(struct bman_portal *bm)
+{
+	const struct bm_portal_config *pcfg;
+	pcfg = bm->config;
+	bm_rcr_cce_update(&bm->p);
+	bm_rcr_cce_update(&bm->p);
+
+	free_irq(pcfg->public_cfg.irq, bm);
+
+	kfree(bm->pools);
+	bm_isr_finish(&bm->p);
+	bm_mc_finish(&bm->p);
+	bm_rcr_finish(&bm->p);
+	bm->config = NULL;
+	if (bm->alloced)
+		kfree(bm);
+}
+
+const struct bm_portal_config *bman_destroy_affine_portal(void)
+{
+	struct bman_portal *bm = get_raw_affine_portal();
+	const struct bm_portal_config *pcfg;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+	if (bm->sharing_redirect) {
+		bm->sharing_redirect = NULL;
+		put_affine_portal();
+		return NULL;
+	}
+	bm->is_shared = 0;
+#endif
+	pcfg = bm->config;
+	bman_destroy_portal(bm);
+	spin_lock(&affine_mask_lock);
+	cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
+	spin_unlock(&affine_mask_lock);
+	put_affine_portal();
+	return pcfg;
+}
+
+/* When release logic waits on available RCR space, we need a global waitqueue
+ * in the case of "affine" use (as the waits wake on different cpus which means
+ * different portals - so we can't wait on any per-portal waitqueue). */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
+{
+	struct bman_depletion tmp;
+	u32 ret = is;
+
+	/* There is a gotcha to be aware of. If we do the query before clearing
+	 * the status register, we may miss state changes that occur between the
+	 * two. If we write to clear the status register before the query, the
+	 * cache-enabled query command may overtake the status register write
+	 * unless we use a heavyweight sync (which we don't want). Instead, we
+	 * write-to-clear the status register then *read it back* before doing
+	 * the query, hence the odd while loop with the 'is' accumulation. */
+	if (is & BM_PIRQ_BSCN) {
+		struct bm_mc_result *mcr;
+		__maybe_unused unsigned long irqflags;
+		unsigned int i, j;
+		u32 __is;
+		bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+		while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
+			is |= __is;
+			bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+		}
+		is &= ~BM_PIRQ_BSCN;
+		PORTAL_IRQ_LOCK(p, irqflags);
+		bm_mc_start(&p->p);
+		bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+		while (!(mcr = bm_mc_result(&p->p)))
+			cpu_relax();
+		tmp = mcr->query.ds.state;
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		for (i = 0; i < 2; i++) {
+			int idx = i * 32;
+			/* tmp is a mask of currently-depleted pools.
+			 * pools[0] is mask of those we care about.
+			 * pools[1] is our previous view (we only want to
+			 * be told about changes). */
+			tmp.__state[i] &= p->pools[0].__state[i];
+			if (tmp.__state[i] == p->pools[1].__state[i])
+				/* fast-path, nothing to see, move along */
+				continue;
+			for (j = 0; j <= 31; j++, idx++) {
+				struct bman_pool *pool = p->cb[idx];
+				int b4 = bman_depletion_get(&p->pools[1], idx);
+				int af = bman_depletion_get(&tmp, idx);
+				if (b4 == af)
+					continue;
+				while (pool) {
+					pool->params.cb(p, pool,
+						pool->params.cb_ctx, af);
+					pool = pool->next;
+				}
+			}
+		}
+		p->pools[1] = tmp;
+	}
+
+	if (is & BM_PIRQ_RCRI) {
+		__maybe_unused unsigned long irqflags;
+		PORTAL_IRQ_LOCK(p, irqflags);
+		bm_rcr_cce_update(&p->p);
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+		/* If waiting for sync, we only cancel the interrupt threshold
+		 * when the ring utilisation hits zero. */
+		if (p->rcri_owned) {
+			if (!bm_rcr_get_fill(&p->p)) {
+				p->rcri_owned = NULL;
+				bm_rcr_set_ithresh(&p->p, 0);
+			}
+		} else
+#endif
+		bm_rcr_set_ithresh(&p->p, 0);
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		wake_up(&affine_queue);
+		bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
+		is &= ~BM_PIRQ_RCRI;
+	}
+
+	/* There should be no status register bits left undefined */
+	DPA_ASSERT(!is);
+	return ret;
+}
+
+const struct bman_portal_config *bman_get_portal_config(void)
+{
+	struct bman_portal *p = get_affine_portal();
+	const struct bman_portal_config *ret = &p->config->public_cfg;
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_get_portal_config);
+
+u32 bman_irqsource_get(void)
+{
+	struct bman_portal *p = get_raw_affine_portal();
+	u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_get);
+
+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
+{
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+	if (p->sharing_redirect)
+		return -EINVAL;
+	else
+#endif
+	{
+		__maybe_unused unsigned long irqflags;
+		PORTAL_IRQ_LOCK(p, irqflags);
+		set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+		bm_isr_enable_write(&p->p, p->irq_sources);
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(bman_p_irqsource_add);
+
+int bman_irqsource_add(__maybe_unused u32 bits)
+{
+	struct bman_portal *p = get_raw_affine_portal();
+	int ret = 0;
+	ret = bman_p_irqsource_add(p, bits);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_add);
+
+int bman_irqsource_remove(u32 bits)
+{
+	struct bman_portal *p = get_raw_affine_portal();
+	__maybe_unused unsigned long irqflags;
+	u32 ier;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+	if (p->sharing_redirect) {
+		put_affine_portal();
+		return -EINVAL;
+	}
+#endif
+	/* Our interrupt handler only processes+clears status register bits that
+	 * are in p->irq_sources. As we're trimming that mask, if one of them
+	 * were to assert in the status register just before we remove it from
+	 * the enable register, there would be an interrupt-storm when we
+	 * release the IRQ lock. So we wait for the enable register update to
+	 * take effect in h/w (by reading it back) and then clear all other bits
+	 * in the status register. Ie. we clear them from ISR once it's certain
+	 * IER won't allow them to reassert. */
+	PORTAL_IRQ_LOCK(p, irqflags);
+	bits &= BM_PIRQ_VISIBLE;
+	clear_bits(bits, &p->irq_sources);
+	bm_isr_enable_write(&p->p, p->irq_sources);
+	ier = bm_isr_enable_read(&p->p);
+	/* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+	 * data-dependency, ie. to protect against re-ordering. */
+	bm_isr_status_clear(&p->p, ~ier);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(bman_irqsource_remove);
+
+const cpumask_t *bman_affine_cpus(void)
+{
+	return &affine_mask;
+}
+EXPORT_SYMBOL(bman_affine_cpus);
+
+u32 bman_poll_slow(void)
+{
+	struct bman_portal *p = get_poll_portal();
+	u32 ret;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+	if (unlikely(p->sharing_redirect))
+		ret = (u32)-1;
+	else
+#endif
+	{
+		u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+		ret = __poll_portal_slow(p, is);
+		bm_isr_status_clear(&p->p, ret);
+	}
+	put_poll_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_poll_slow);
+
+/* Legacy wrapper */
+void bman_poll(void)
+{
+	struct bman_portal *p = get_poll_portal();
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+	if (unlikely(p->sharing_redirect))
+		goto done;
+#endif
+	if (!(p->slowpoll--)) {
+		u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+		u32 active = __poll_portal_slow(p, is);
+		if (active)
+			p->slowpoll = SLOW_POLL_BUSY;
+		else
+			p->slowpoll = SLOW_POLL_IDLE;
+	}
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+done:
+#endif
+	put_poll_portal();
+}
+EXPORT_SYMBOL(bman_poll);
+
+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
+
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
+{
+	struct bman_pool *pool = NULL;
+	u32 bpid;
+
+	if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
+		int ret = bman_alloc_bpid(&bpid);
+		if (ret)
+			return NULL;
+	} else {
+		if (params->bpid >= bman_pool_max)
+			return NULL;
+		bpid = params->bpid;
+	}
+#ifdef CONFIG_FSL_BMAN_CONFIG
+	if (params->flags & BMAN_POOL_FLAG_THRESH) {
+		int ret = bm_pool_set(bpid, params->thresholds);
+		if (ret)
+			goto err;
+	}
+#else
+	if (params->flags & BMAN_POOL_FLAG_THRESH)
+		goto err;
+#endif
+	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		goto err;
+	pool->sp = NULL;
+	pool->sp_fill = 0;
+	pool->params = *params;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	atomic_set(&pool->in_use, 1);
+#endif
+	if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+		pool->params.bpid = bpid;
+	if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
+		pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
+					GFP_KERNEL);
+		if (!pool->sp)
+			goto err;
+	}
+	if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
+		struct bman_portal *p = get_affine_portal();
+		if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
+			pr_err("Depletion events disabled for bpid %d\n", bpid);
+			goto err;
+		}
+		depletion_link(p, pool);
+		put_affine_portal();
+	}
+	return pool;
+err:
+#ifdef CONFIG_FSL_BMAN_CONFIG
+	if (params->flags & BMAN_POOL_FLAG_THRESH)
+		bm_pool_set(bpid, zero_thresholds);
+#endif
+	if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+		bman_release_bpid(bpid);
+	if (pool) {
+		kfree(pool->sp);
+		kfree(pool);
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+#ifdef CONFIG_FSL_BMAN_CONFIG
+	if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
+		bm_pool_set(pool->params.bpid, zero_thresholds);
+#endif
+	if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
+		depletion_unlink(pool);
+	if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
+		if (pool->sp_fill)
+			pr_err("Stockpile not flushed, has %u in bpid %u.\n",
+				pool->sp_fill, pool->params.bpid);
+		kfree(pool->sp);
+		pool->sp = NULL;
+		pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
+	}
+	if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+		bman_release_bpid(pool->params.bpid);
+	kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
+{
+	return &pool->params;
+}
+EXPORT_SYMBOL(bman_get_params);
+
+static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
+{
+	if (avail)
+		bm_rcr_cce_prefetch(&p->p);
+	else
+		bm_rcr_cce_update(&p->p);
+}
+
+int bman_rcr_is_empty(void)
+{
+	__maybe_unused unsigned long irqflags;
+	struct bman_portal *p = get_affine_portal();
+	u8 avail;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	update_rcr_ci(p, 0);
+	avail = bm_rcr_get_fill(&p->p);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return avail == 0;
+}
+EXPORT_SYMBOL(bman_rcr_is_empty);
+
+static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+					__maybe_unused struct bman_pool *pool,
+#endif
+					__maybe_unused unsigned long *irqflags,
+					__maybe_unused u32 flags)
+{
+	struct bm_rcr_entry *r;
+	u8 avail;
+
+	*p = get_affine_portal();
+	PORTAL_IRQ_LOCK(*p, (*irqflags));
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+			(flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+		if ((*p)->rcri_owned) {
+			PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+			put_affine_portal();
+			return NULL;
+		}
+		(*p)->rcri_owned = pool;
+	}
+#endif
+	avail = bm_rcr_get_avail(&(*p)->p);
+	if (avail < 2)
+		update_rcr_ci(*p, avail);
+	r = bm_rcr_start(&(*p)->p);
+	if (unlikely(!r)) {
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+		if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+				(flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
+			(*p)->rcri_owned = NULL;
+#endif
+		PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+		put_affine_portal();
+	}
+	return r;
+}
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
+					struct bman_pool *pool,
+					__maybe_unused unsigned long *irqflags,
+					u32 flags)
+{
+	struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
+	if (!rcr)
+		bm_rcr_set_ithresh(&(*p)->p, 1);
+	return rcr;
+}
+
+static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
+					struct bman_pool *pool,
+					__maybe_unused unsigned long *irqflags,
+					u32 flags)
+{
+	struct bm_rcr_entry *rcr;
+#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+	pool = NULL;
+#endif
+	if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+		wait_event_interruptible(affine_queue,
+			(rcr = __wait_rel_start(p, pool, irqflags, flags)));
+	else
+		wait_event(affine_queue,
+			(rcr = __wait_rel_start(p, pool, irqflags, flags)));
+	return rcr;
+}
+#endif
+
+/* to facilitate better copying of bufs into the ring without either (a) copying
+ * noise into the first byte (prematurely triggering the command), nor (b) being
+ * very inefficient by copying small fields using read-modify-write */
+struct overlay_bm_buffer {
+	u32 first;
+	u32 second;
+};
+
+static inline int __bman_release(struct bman_pool *pool,
+			const struct bm_buffer *bufs, u8 num, u32 flags)
+{
+	struct bman_portal *p;
+	struct bm_rcr_entry *r;
+	struct overlay_bm_buffer *o_dest;
+	struct overlay_bm_buffer *o_src = (struct overlay_bm_buffer *)&bufs[0];
+	__maybe_unused unsigned long irqflags;
+	u32 i = num - 1;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+	if (flags & BMAN_RELEASE_FLAG_WAIT)
+		r = wait_rel_start(&p, pool, &irqflags, flags);
+	else
+		r = try_rel_start(&p, pool, &irqflags, flags);
+#else
+	r = try_rel_start(&p, &irqflags, flags);
+#endif
+	if (!r)
+		return -EBUSY;
+	/* We can copy all but the first entry, as this can trigger badness
+	 * with the valid-bit. Use the overlay to mask the verb byte. */
+	o_dest = (struct overlay_bm_buffer *)&r->bufs[0];
+	o_dest->first = (o_src->first & 0x0000ffff) |
+		(((u32)pool->params.bpid << 16) & 0x00ff0000);
+	o_dest->second = o_src->second;
+	if (i)
+		copy_words(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+	bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+			(num & BM_RCR_VERB_BUFCOUNT_MASK));
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+	/* if we wish to sync we need to set the threshold after h/w sees the
+	 * new ring entry. As we're mixing cache-enabled and cache-inhibited
+	 * accesses, this requires a heavy-weight sync. */
+	if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+			(flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+		hwsync();
+		bm_rcr_set_ithresh(&p->p, 1);
+	}
+#endif
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+			(flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+		if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->rcri_owned != pool));
+		else
+			wait_event(affine_queue, (p->rcri_owned != pool));
+	}
+#endif
+	return 0;
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+			u32 flags)
+{
+	int ret = 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (!num || (num > 8))
+		return -EINVAL;
+	if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
+		return -EINVAL;
+#endif
+	/* Without stockpile, this API is a pass-through to the h/w operation */
+	if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+		return __bman_release(pool, bufs, num, flags);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (!atomic_dec_and_test(&pool->in_use)) {
+		pr_crit("Parallel attempts to enter bman_released() detected.");
+		panic("only one instance of bman_released/acquired allowed");
+	}
+#endif
+	/* This needs some explanation. Adding the given buffers may take the
+	 * stockpile over the threshold, but in fact the stockpile may already
+	 * *be* over the threshold if a previous release-to-hw attempt had
+	 * failed. So we have 3 cases to cover;
+	 *   1. we add to the stockpile and don't hit the threshold,
+	 *   2. we add to the stockpile, hit the threshold and release-to-hw,
+	 *   3. we have to release-to-hw before adding to the stockpile
+	 *	(not enough room in the stockpile for case 2).
+	 * Our constraints on thresholds guarantee that in case 3, there must be
+	 * at least 8 bufs already in the stockpile, so all release-to-hw ops
+	 * are for 8 bufs. Despite all this, the API must indicate whether the
+	 * given buffers were taken off the caller's hands, irrespective of
+	 * whether a release-to-hw was attempted. */
+	while (num) {
+		/* Add buffers to stockpile if they fit */
+		if ((pool->sp_fill + num) < BMAN_STOCKPILE_SZ) {
+			copy_words(pool->sp + pool->sp_fill, bufs,
+				sizeof(struct bm_buffer) * num);
+			pool->sp_fill += num;
+			num = 0; /* --> will return success no matter what */
+		}
+		/* Do hw op if hitting the high-water threshold */
+		if ((pool->sp_fill + num) >= BMAN_STOCKPILE_HIGH) {
+			ret = __bman_release(pool,
+				pool->sp + (pool->sp_fill - 8), 8, flags);
+			if (ret) {
+				ret = (num ? ret : 0);
+				goto release_done;
+			}
+			pool->sp_fill -= 8;
+		}
+	}
+release_done:
+#ifdef CONFIG_FSL_DPA_CHECKING
+	atomic_inc(&pool->in_use);
+#endif
+	return ret;
+}
+EXPORT_SYMBOL(bman_release);
+
+static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
+					u8 num)
+{
+	struct bman_portal *p = get_affine_portal();
+	struct bm_mc_command *mcc;
+	struct bm_mc_result *mcr;
+	__maybe_unused unsigned long irqflags;
+	int ret;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = bm_mc_start(&p->p);
+	mcc->acquire.bpid = pool->params.bpid;
+	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+			(num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+	while (!(mcr = bm_mc_result(&p->p)))
+		cpu_relax();
+	ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+	if (bufs)
+		copy_words(&bufs[0], &mcr->acquire.bufs[0],
+				num * sizeof(bufs[0]));
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (ret != num)
+		ret = -ENOMEM;
+	return ret;
+}
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+			u32 flags)
+{
+	int ret = 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (!num || (num > 8))
+		return -EINVAL;
+	if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
+		return -EINVAL;
+#endif
+	/* Without stockpile, this API is a pass-through to the h/w operation */
+	if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+		return __bman_acquire(pool, bufs, num);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (!atomic_dec_and_test(&pool->in_use)) {
+		pr_crit("Parallel attempts to enter bman_acquire() detected.");
+		panic("only one instance of bman_released/acquired allowed");
+	}
+#endif
+	/* Only need a h/w op if we'll hit the low-water thresh */
+	if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) &&
+			(pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) {
+		/* refill stockpile with max amount, but if max amount
+		 * isn't available, try amount the user wants */
+		int bufcount = 8;
+		ret = __bman_acquire(pool, pool->sp + pool->sp_fill, bufcount);
+		if (ret < 0 && bufcount != num) {
+			bufcount = num;
+			/* Maybe buffer pool has less than 8 */
+			ret = __bman_acquire(pool, pool->sp + pool->sp_fill,
+						bufcount);
+		}
+		if (ret < 0)
+			goto hw_starved;
+		DPA_ASSERT(ret == bufcount);
+		pool->sp_fill += bufcount;
+	} else {
+hw_starved:
+		if (pool->sp_fill < num) {
+			ret = -ENOMEM;
+			goto acquire_done;
+		}
+	}
+	copy_words(bufs, pool->sp + (pool->sp_fill - num),
+		sizeof(struct bm_buffer) * num);
+	pool->sp_fill -= num;
+	ret = num;
+acquire_done:
+#ifdef CONFIG_FSL_DPA_CHECKING
+	atomic_inc(&pool->in_use);
+#endif
+	return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+int bman_flush_stockpile(struct bman_pool *pool, u32 flags)
+{
+	u8 num;
+	int ret;
+
+	while (pool->sp_fill) {
+		num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill);
+		ret = __bman_release(pool, pool->sp + (pool->sp_fill - num),
+				     num, flags);
+		if (ret)
+			return ret;
+		pool->sp_fill -= num;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(bman_flush_stockpile);
+
+#ifdef CONFIG_FSL_BMAN_CONFIG
+u32 bman_query_free_buffers(struct bman_pool *pool)
+{
+	return bm_pool_free_buffers(pool->params.bpid);
+}
+EXPORT_SYMBOL(bman_query_free_buffers);
+
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
+{
+	u32 bpid;
+
+	bpid = bman_get_params(pool)->bpid;
+
+	return bm_pool_set(bpid, thresholds);
+}
+EXPORT_SYMBOL(bman_update_pool_thresholds);
+#endif
+
+int bman_shutdown_pool(u32 bpid)
+{
+	struct bman_portal *p = get_affine_portal();
+	__maybe_unused unsigned long irqflags;
+	int ret;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	ret = bm_shutdown_pool(&p->p, bpid);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_shutdown_pool);
+
+const struct bm_portal_config *bman_get_bm_portal_config(
+						struct bman_portal *portal)
+{
+	return portal->sharing_redirect ? NULL : portal->config;
+}
diff --git a/drivers/soc/fsl/bman_portal.c b/drivers/soc/fsl/bman_portal.c
new file mode 100644
index 0000000..b06339a
--- /dev/null
+++ b/drivers/soc/fsl/bman_portal.c
@@ -0,0 +1,329 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+u16 bman_pool_max;
+EXPORT_SYMBOL(bman_pool_max);
+
+/* After initialising cpus that own shared portal configs, we cache the
+ * resulting portals (ie. not just the configs) in this array. Then we
+ * initialise slave cpus that don't have their own portals, redirecting them to
+ * portals from this cache in a round-robin assignment. */
+static struct bman_portal *shared_portals[NR_CPUS] __initdata;
+static int num_shared_portals __initdata;
+static int shared_portals_idx __initdata;
+static struct list_head unused_pcfgs __initdata = LIST_HEAD_INIT(unused_pcfgs);
+
+static void *affine_bportals[NR_CPUS];
+
+static struct bm_portal_config * __init parse_pcfg(struct device_node *node)
+{
+	struct bm_portal_config *pcfg;
+	int irq, ret;
+
+	pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
+	if (!pcfg) {
+		pr_err("can't allocate portal config");
+		return NULL;
+	}
+
+	if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
+		of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
+		bman_ip_rev = BMAN_REV10;
+		bman_pool_max = 64;
+	} else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
+		of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
+		bman_ip_rev = BMAN_REV20;
+		bman_pool_max = 8;
+	} else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0") ||
+		   of_device_is_compatible(node, "fsl,bman-portal-2.1.1") ||
+		   of_device_is_compatible(node, "fsl,bman-portal-2.1.2") ||
+		   of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
+		bman_ip_rev = BMAN_REV21;
+		bman_pool_max = 64;
+	}
+
+	ret = of_address_to_resource(node, DPA_PORTAL_CE,
+				&pcfg->addr_phys[DPA_PORTAL_CE]);
+	if (ret) {
+		pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
+		goto err;
+	}
+	ret = of_address_to_resource(node, DPA_PORTAL_CI,
+				&pcfg->addr_phys[DPA_PORTAL_CI]);
+	if (ret) {
+		pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
+		goto err;
+	}
+
+	pcfg->public_cfg.cpu = -1;
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		pr_err("Can't get %s property 'interrupts'\n", node->full_name);
+		goto err;
+	}
+	pcfg->public_cfg.irq = irq;
+	bman_depletion_fill(&pcfg->public_cfg.mask);
+
+	pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
+				pcfg->addr_phys[DPA_PORTAL_CE].start,
+				resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]),
+				0);
+	pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
+				pcfg->addr_phys[DPA_PORTAL_CI].start,
+				resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
+				_PAGE_GUARDED | _PAGE_NO_CACHE);
+	return pcfg;
+err:
+	kfree(pcfg);
+	return NULL;
+}
+
+static struct bm_portal_config * __init get_pcfg(struct list_head *list)
+{
+	struct bm_portal_config *pcfg;
+
+	if (list_empty(list))
+		return NULL;
+	pcfg = list_entry(list->prev, struct bm_portal_config, list);
+	list_del(&pcfg->list);
+
+	return pcfg;
+}
+
+static struct bman_portal * __init init_pcfg(struct bm_portal_config *pcfg)
+{
+	struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+	if (p) {
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+		bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
+#endif
+		pr_info("BMan portal %s initialised, cpu %d\n",
+			pcfg->public_cfg.is_shared ? "(shared) " : "",
+			pcfg->public_cfg.cpu);
+		affine_bportals[pcfg->public_cfg.cpu] = p;
+	} else
+		pr_crit("BMan portal failure on cpu %d\n",
+			pcfg->public_cfg.cpu);
+
+	return p;
+}
+
+static void __init init_slave(int cpu)
+{
+	struct bman_portal *p;
+
+	p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
+	if (!p)
+		pr_err("BMan slave portal failure on cpu %d\n", cpu);
+	else
+		pr_info("BMan portal %s initialised, cpu %d\n", "(slave) ", cpu);
+	if (shared_portals_idx >= num_shared_portals)
+		shared_portals_idx = 0;
+	affine_bportals[cpu] = p;
+}
+
+/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
+ * parsing is in dpaa_sys.h. The syntax is a comma-separated list of indexes
+ * and/or ranges of indexes, with each being optionally prefixed by "s" to
+ * explicitly mark it or them for sharing.
+ *    Eg;
+ *	  bportals=s0,1-3,s4
+ * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
+ * portals, and any remaining cpus share the portals that are assigned to cpus 0
+ * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
+ * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
+ * 0's portal.) */
+static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
+static struct cpumask want_shared __initdata; /* cpus requested with "s" */
+
+static int __init parse_bportals(char *str)
+{
+	return parse_portals_bootarg(str, &want_shared, &want_unshared,
+				     "bportals");
+}
+__setup("bportals=", parse_bportals);
+
+static void bman_offline_cpu(unsigned int cpu)
+{
+	struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu];
+	const struct bm_portal_config *pcfg;
+
+	if (p) {
+		pcfg = bman_get_bm_portal_config(p);
+		if (pcfg)
+			irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+	}
+}
+
+/* Initialise the BMan driver. The meat of this function deals with portals. The
+ * following describes the flow of portal-handling, the code "steps" refer to
+ * this description;
+ * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
+ *    ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
+ *    bound).
+ * 2. The "want_shared" and "want_unshared" lists (as filled by the
+ *    "bportals=[...]" bootarg) are processed, allocating portals and assigning
+ *    them to cpus, placing them in the relevant list and setting ::cpu as
+ *    appropriate. If no "bportals" bootarg was present, the defaut is to try to
+ *    assign portals to all online cpus at the time of driver initialisation.
+ *    Any failure to allocate portals (when parsing the "want" lists or when
+ *    using default behaviour) will be silently tolerated (the "fixup" logic in
+ *    step 3 will determine what happens in this case).
+ * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
+ *    sharing and sharing is required (because not all cpus have been assigned
+ *    portals), then one portal will marked for sharing. Conversely if no
+ *    sharing is required, any portals marked for sharing will not be shared. It
+ *    may be that sharing occurs when it wasn't expected, if portal allocation
+ *    failed to honour all the requested assignments (including the default
+ *    assignments if no bootarg is present).
+ * 4. Unshared portals are initialised on their respective cpus.
+ * 5. Shared portals are initialised on their respective cpus.
+ * 6. Each remaining cpu is initialised to slave to one of the shared portals,
+ *    which are selected in a round-robin fashion.
+ */
+int __init bman_init(void)
+{
+	struct cpumask slave_cpus;
+	struct cpumask unshared_cpus = *cpu_none_mask;
+	struct cpumask shared_cpus = *cpu_none_mask;
+	LIST_HEAD(unshared_pcfgs);
+	LIST_HEAD(shared_pcfgs);
+	struct device_node *dn;
+	struct bm_portal_config *pcfg;
+	struct bman_portal *p;
+	int cpu;
+	struct cpumask offline_cpus;
+
+	/* Initialise the BMan (CCSR) device */
+	for_each_compatible_node(dn, NULL, "fsl,bman") {
+		if (!bman_init_ccsr(dn))
+			pr_info("BMan err interrupt handler present\n");
+		else
+			pr_err("BMan CCSR setup failed\n");
+	}
+	/* Step 1. See comments at the beginning of the file. */
+	for_each_compatible_node(dn, NULL, "fsl,bman-portal") {
+		if (!of_device_is_available(dn))
+			continue;
+		pcfg = parse_pcfg(dn);
+		if (pcfg)
+			list_add_tail(&pcfg->list, &unused_pcfgs);
+	}
+	/* Step 2. */
+	for_each_possible_cpu(cpu) {
+		if (cpumask_test_cpu(cpu, &want_shared)) {
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &shared_pcfgs);
+			cpumask_set_cpu(cpu, &shared_cpus);
+		}
+		if (cpumask_test_cpu(cpu, &want_unshared)) {
+			if (cpumask_test_cpu(cpu, &shared_cpus))
+				continue;
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &unshared_pcfgs);
+			cpumask_set_cpu(cpu, &unshared_cpus);
+		}
+	}
+	if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
+		/* Default, give an unshared portal to each online cpu */
+		for_each_possible_cpu(cpu) {
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &unshared_pcfgs);
+			cpumask_set_cpu(cpu, &unshared_cpus);
+		}
+	}
+	/* Step 3. */
+	cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
+	cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
+	if (cpumask_empty(&slave_cpus)) {
+		/* No sharing required */
+		if (!list_empty(&shared_pcfgs)) {
+			/* Migrate "shared" to "unshared" */
+			cpumask_or(&unshared_cpus, &unshared_cpus,
+				   &shared_cpus);
+			cpumask_clear(&shared_cpus);
+			list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
+			INIT_LIST_HEAD(&shared_pcfgs);
+		}
+	} else {
+		/* Sharing required */
+		if (list_empty(&shared_pcfgs)) {
+			/* Migrate one "unshared" to "shared" */
+			pcfg = get_pcfg(&unshared_pcfgs);
+			if (!pcfg) {
+				pr_crit("No BMan portals available!\n");
+				return 0;
+			}
+			cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
+			cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
+			list_add_tail(&pcfg->list, &shared_pcfgs);
+		}
+	}
+	/* Step 4. */
+	list_for_each_entry(pcfg, &unshared_pcfgs, list) {
+		pcfg->public_cfg.is_shared = 0;
+		p = init_pcfg(pcfg);
+	}
+	/* Step 5. */
+	list_for_each_entry(pcfg, &shared_pcfgs, list) {
+		pcfg->public_cfg.is_shared = 1;
+		p = init_pcfg(pcfg);
+		if (p)
+			shared_portals[num_shared_portals++] = p;
+	}
+	/* Step 6. */
+	if (!cpumask_empty(&slave_cpus))
+		for_each_cpu(cpu, &slave_cpus)
+			init_slave(cpu);
+	pr_info("BMan portals initialised\n");
+	cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
+	for_each_cpu(cpu, &offline_cpus)
+		bman_offline_cpu(cpu);
+
+	return 0;
+}
diff --git a/drivers/soc/fsl/bman_priv.h b/drivers/soc/fsl/bman_priv.h
new file mode 100644
index 0000000..a88da76
--- /dev/null
+++ b/drivers/soc/fsl/bman_priv.h
@@ -0,0 +1,148 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+#include <soc/fsl/bman.h>
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+extern u16 bman_pool_max;
+
+/* used by CCSR and portal interrupt code */
+enum bm_isr_reg {
+	bm_isr_status = 0,
+	bm_isr_enable = 1,
+	bm_isr_disable = 2,
+	bm_isr_inhibit = 3
+};
+
+struct bm_portal_config {
+	/* Corenet portal addresses;
+	 * [0]==cache-enabled, [1]==cache-inhibited. */
+	__iomem void *addr_virt[2];
+	struct resource addr_phys[2];
+	/* Allow these to be joined in lists */
+	struct list_head list;
+	/* User-visible portal configuration settings */
+	struct bman_portal_config public_cfg;
+};
+
+#ifdef CONFIG_FSL_BMAN_CONFIG
+/* Hooks from bman_driver.c to bman_config.c */
+int bman_init_ccsr(struct device_node *node);
+#endif
+
+/* Hooks from bman_driver.c in to bman_high.c */
+struct bman_portal *bman_create_portal(
+				       struct bman_portal *portal,
+				       const struct bm_portal_config *config);
+struct bman_portal *bman_create_affine_portal(
+			const struct bm_portal_config *config);
+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
+								int cpu);
+void bman_destroy_portal(struct bman_portal *bm);
+
+const struct bm_portal_config *bman_destroy_affine_portal(void);
+
+/* Pool logic in the portal driver, during initialisation, needs to know if
+ * there's access to CCSR or not (if not, it'll cripple the pool allocator). */
+#ifdef CONFIG_FSL_BMAN_CONFIG
+int bman_have_ccsr(void);
+#else
+#define bman_have_ccsr() 0
+#endif
+
+/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
+ * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
+ * might fail (if the buffer pool is depleted). So this value provides some
+ * "stagger" in that the bman_acquire() function will only fail if lots of bufs
+ * are requested at once or if h/w has been tested a couple of times without
+ * luck. The _HIGH value: when bman_release() is called and the stockpile
+ * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
+ * the release ring is full). So this value provides some "stagger" so that
+ * ring-access is retried a couple of times prior to the API returning a
+ * failure. The following *must* be true;
+ *   BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
+ *     (to avoid thrashing)
+ *   BMAN_STOCKPILE_SZ >= 16
+ *     (as the release logic expects to either send 8 buffers to hw prior to
+ *     adding the given buffers to the stockpile or add the buffers to the
+ *     stockpile before sending 8 to hw, as the API must be an all-or-nothing
+ *     success/fail.)
+ */
+#define BMAN_STOCKPILE_SZ   16u /* number of bufs in per-pool cache */
+#define BMAN_STOCKPILE_LOW  2u	/* when fill is <= this, acquire from hw */
+#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
+
+/*************************************************/
+/*   BMan s/w corenet portal, low-level i/face	 */
+/*************************************************/
+
+/* Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE	(BM_PIRQ_RCRI | BM_PIRQ_BSCN)
+
+/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write". */
+#define bm_isr_status_read(bm)		__bm_isr_read(bm, bm_isr_status)
+#define bm_isr_status_clear(bm, m)	__bm_isr_write(bm, bm_isr_status, m)
+#define bm_isr_enable_read(bm)		__bm_isr_read(bm, bm_isr_enable)
+#define bm_isr_enable_write(bm, v)	__bm_isr_write(bm, bm_isr_enable, v)
+#define bm_isr_disable_read(bm)		__bm_isr_read(bm, bm_isr_disable)
+#define bm_isr_disable_write(bm, v)	__bm_isr_write(bm, bm_isr_disable, v)
+#define bm_isr_inhibit(bm)		__bm_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_isr_uninhibit(bm)		__bm_isr_write(bm, bm_isr_inhibit, 0)
+
+#ifdef CONFIG_FSL_BMAN_CONFIG
+/* Set depletion thresholds associated with a buffer pool. Requires that the
+ * operating system have access to BMan CCSR (ie. compiled in support and
+ * run-time access courtesy of the device-tree). */
+int bm_pool_set(u32 bpid, const u32 *thresholds);
+#define BM_POOL_THRESH_SW_ENTER 0
+#define BM_POOL_THRESH_SW_EXIT	1
+#define BM_POOL_THRESH_HW_ENTER 2
+#define BM_POOL_THRESH_HW_EXIT	3
+
+/* Read the free buffer count for a given buffer */
+u32 bm_pool_free_buffers(u32 bpid);
+
+__init int bman_init(void);
+
+const struct bm_portal_config *bman_get_bm_portal_config(
+						struct bman_portal *portal);
+#endif /* CONFIG_FSL_BMAN_CONFIG */
diff --git a/drivers/soc/fsl/dpaa_alloc.c b/drivers/soc/fsl/dpaa_alloc.c
new file mode 100644
index 0000000..577c2bd
--- /dev/null
+++ b/drivers/soc/fsl/dpaa_alloc.c
@@ -0,0 +1,403 @@
+/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+#include <soc/fsl/bman.h>
+
+/* This interface is needed in a few places and though it's not specific to
+ * USDPAA as such, creating a new header for it doesn't make any sense. The
+ * qbman kernel driver implements this interface and uses it as the backend for
+ * both the FQID and BPID allocators. The fsl_usdpaa driver also uses this
+ * interface for tracking per-process allocations handed out to user-space
+ */
+struct dpa_alloc {
+	struct list_head free;
+	spinlock_t lock;
+	struct list_head used;
+};
+
+#define DECLARE_DPA_ALLOC(name) \
+	struct dpa_alloc name = { \
+		.free = { \
+			.prev = &name.free, \
+			.next = &name.free \
+		}, \
+		.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+		.used = { \
+			 .prev = &name.used, \
+			 .next = &name.used \
+		 } \
+	}
+
+/* The allocator is a (possibly-empty) list of these */
+struct alloc_node {
+	struct list_head list;
+	u32 base;
+	u32 num;
+	/* refcount and is_alloced are only set
+	   when the node is in the used list */
+	unsigned int refcount;
+	int is_alloced;
+};
+
+#ifdef DPA_ALLOC_DEBUG
+#define DPRINT pr_info
+static void DUMP(struct dpa_alloc *alloc)
+{
+	int off = 0;
+	char buf[256];
+	struct alloc_node *p;
+	pr_info("Free Nodes\n");
+	list_for_each_entry(p, &alloc->free, list) {
+		if (off < 255)
+			off += snprintf(buf + off, 255-off, "{%d,%d}",
+				p->base, p->base + p->num - 1);
+	}
+	pr_info("%s\n", buf);
+
+	off = 0;
+	pr_info("Used Nodes\n");
+	list_for_each_entry(p, &alloc->used, list) {
+		if (off < 255)
+			off += snprintf(buf + off, 255-off, "{%d,%d}",
+				p->base, p->base + p->num - 1);
+	}
+	pr_info("%s\n", buf);
+}
+#else
+#define DPRINT(x...)
+#define DUMP(a)
+#endif
+
+static int dpa_alloc_new(struct dpa_alloc *alloc,
+			 u32 *result, u32 count, u32 align, int partial)
+{
+	struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL;
+	u32 base, next_best_base = 0, num = 0, next_best_num = 0;
+	struct alloc_node *margin_left, *margin_right;
+
+	*result = (u32)-1;
+	DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
+	DUMP(alloc);
+	/* If 'align' is 0, it should behave as though it was 1 */
+	if (!align)
+		align = 1;
+	margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
+	if (!margin_left)
+		goto err;
+	margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
+	if (!margin_right) {
+		kfree(margin_left);
+		goto err;
+	}
+	spin_lock_irq(&alloc->lock);
+	list_for_each_entry(i, &alloc->free, list) {
+		base = (i->base + align - 1) / align;
+		base *= align;
+		if ((base - i->base) >= i->num)
+			/* alignment is impossible, regardless of count */
+			continue;
+		num = i->num - (base - i->base);
+		if (num >= count) {
+			/* this one will do nicely */
+			num = count;
+			goto done;
+		}
+		if (num > next_best_num) {
+			next_best = i;
+			next_best_base = base;
+			next_best_num = num;
+		}
+	}
+	if (partial && next_best) {
+		i = next_best;
+		base = next_best_base;
+		num = next_best_num;
+	} else
+		i = NULL;
+done:
+	if (i) {
+		if (base != i->base) {
+			margin_left->base = i->base;
+			margin_left->num = base - i->base;
+			list_add_tail(&margin_left->list, &i->list);
+		} else
+			kfree(margin_left);
+		if ((base + num) < (i->base + i->num)) {
+			margin_right->base = base + num;
+			margin_right->num = (i->base + i->num) -
+						(base + num);
+			list_add(&margin_right->list, &i->list);
+		} else
+			kfree(margin_right);
+		list_del(&i->list);
+		kfree(i);
+		*result = base;
+	}
+	spin_unlock_irq(&alloc->lock);
+err:
+	DPRINT("returning %d\n", i ? num : -ENOMEM);
+	DUMP(alloc);
+	if (!i)
+		return -ENOMEM;
+
+	/* Add the allocation to the used list with a refcount of 1 */
+	used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+	if (!used_node)
+		return -ENOMEM;
+	used_node->base = *result;
+	used_node->num = num;
+	used_node->refcount = 1;
+	used_node->is_alloced = 1;
+	list_add_tail(&used_node->list, &alloc->used);
+	return (int)num;
+}
+
+/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
+ * forcing error-handling on to users in the deallocation path. */
+static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
+{
+	struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+	BUG_ON(!node);
+	DPRINT("release_range(%d,%d)\n", base_id, count);
+	DUMP(alloc);
+	BUG_ON(!count);
+	spin_lock_irq(&alloc->lock);
+
+
+	node->base = base_id;
+	node->num = count;
+	list_for_each_entry(i, &alloc->free, list) {
+		if (i->base >= node->base) {
+			/* BUG_ON(any overlapping) */
+			BUG_ON(i->base < (node->base + node->num));
+			list_add_tail(&node->list, &i->list);
+			goto done;
+		}
+	}
+	list_add_tail(&node->list, &alloc->free);
+done:
+	/* Merge to the left */
+	i = list_entry(node->list.prev, struct alloc_node, list);
+	if (node->list.prev != &alloc->free) {
+		BUG_ON((i->base + i->num) > node->base);
+		if ((i->base + i->num) == node->base) {
+			node->base = i->base;
+			node->num += i->num;
+			list_del(&i->list);
+			kfree(i);
+		}
+	}
+	/* Merge to the right */
+	i = list_entry(node->list.next, struct alloc_node, list);
+	if (node->list.next != &alloc->free) {
+		BUG_ON((node->base + node->num) > i->base);
+		if ((node->base + node->num) == i->base) {
+			node->num += i->num;
+			list_del(&i->list);
+			kfree(i);
+		}
+	}
+	spin_unlock_irq(&alloc->lock);
+	DUMP(alloc);
+}
+
+static void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
+{
+	struct alloc_node *i = NULL;
+	spin_lock_irq(&alloc->lock);
+
+	/* First find the node in the used list and decrement its ref count */
+	list_for_each_entry(i, &alloc->used, list) {
+		if (i->base == base_id && i->num == count) {
+			--i->refcount;
+			if (i->refcount == 0) {
+				list_del(&i->list);
+				spin_unlock_irq(&alloc->lock);
+				if (i->is_alloced)
+					_dpa_alloc_free(alloc, base_id, count);
+				kfree(i);
+				return;
+			}
+			spin_unlock_irq(&alloc->lock);
+			return;
+		}
+	}
+	/* Couldn't find the allocation */
+	pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
+	       base_id, count);
+	spin_unlock_irq(&alloc->lock);
+}
+
+/* Same as free but no previous allocation checking is needed */
+static void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count)
+{
+	_dpa_alloc_free(alloc, base_id, count);
+}
+
+/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
+ * desired range is not available, or 0 for success
+ */
+static int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num)
+{
+	struct alloc_node *i = NULL, *used_node;
+
+	DPRINT("alloc_reserve(%d,%d)\n", base, num);
+	DUMP(alloc);
+
+	spin_lock_irq(&alloc->lock);
+
+	/* Check for the node in the used list.
+	   If found, increase it's refcount */
+	list_for_each_entry(i, &alloc->used, list) {
+		if ((i->base == base) && (i->num == num)) {
+			++i->refcount;
+			spin_unlock_irq(&alloc->lock);
+			return 0;
+		}
+		if ((base >= i->base) && (base < (i->base + i->num))) {
+			/* This is an attempt to reserve a region that was
+			   already reserved or alloced with a different
+			   base or num */
+			pr_err("Cannot reserve %d - %d, it overlaps with"
+			       " existing reservation from %d - %d\n",
+			       base, base + num - 1, i->base,
+			       i->base + i->num - 1);
+			spin_unlock_irq(&alloc->lock);
+			return -1;
+		}
+	}
+	/* Check to make sure this ID isn't in the free list */
+	list_for_each_entry(i, &alloc->free, list) {
+		if ((base >= i->base) && (base < (i->base + i->num))) {
+			/* yep, the reservation is within this node */
+			pr_err("Cannot reserve %d - %d, it overlaps with"
+			       " free range %d - %d and must be alloced\n",
+			       base, base + num - 1,
+			       i->base, i->base + i->num - 1);
+			spin_unlock_irq(&alloc->lock);
+			return -1;
+		}
+	}
+	/* Add the allocation to the used list with a refcount of 1 */
+	used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+	if (!used_node) {
+		spin_unlock_irq(&alloc->lock);
+		return -ENOMEM;
+
+	}
+	used_node->base = base;
+	used_node->num = num;
+	used_node->refcount = 1;
+	used_node->is_alloced = 0;
+	list_add_tail(&used_node->list, &alloc->used);
+	spin_unlock_irq(&alloc->lock);
+	return 0;
+}
+
+/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing
+ * FQIDs (probably from user-space), it can filter out those that aren't in the
+ * OOS state (better to leak a h/w resource than to crash). This function
+ * returns the number of invalid IDs that were not released. */
+static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count,
+			     int (*is_valid)(u32 id))
+{
+	int valid_mode = 0;
+	u32 loop = id, total_invalid = 0;
+	while (loop < (id + count)) {
+		int isvalid = is_valid ? is_valid(loop) : 1;
+		if (!valid_mode) {
+			/* We're looking for a valid ID to terminate an invalid
+			 * range */
+			if (isvalid) {
+				/* We finished a range of invalid IDs, a valid
+				 * range is now underway */
+				valid_mode = 1;
+				count -= (loop - id);
+				id = loop;
+			} else
+				total_invalid++;
+		} else {
+			/* We're looking for an invalid ID to terminate a
+			 * valid range */
+			if (!isvalid) {
+				/* Release the range of valid IDs, an unvalid
+				 * range is now underway */
+				if (loop > id)
+					dpa_alloc_free(alloc, id, loop - id);
+				valid_mode = 0;
+			}
+		}
+		loop++;
+	}
+	/* Release any unterminated range of valid IDs */
+	if (valid_mode && count)
+		dpa_alloc_free(alloc, id, count);
+	return total_invalid;
+}
+
+/* BMan APIs are front-ends to the common code */
+
+#ifdef CONFIG_FSL_BMAN
+static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */
+
+/* BPID allocator front-end */
+
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
+{
+	return dpa_alloc_new(&bpalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(bman_alloc_bpid_range);
+
+static int bp_cleanup(u32 bpid)
+{
+	return bman_shutdown_pool(bpid) == 0;
+}
+void bman_release_bpid_range(u32 bpid, u32 count)
+{
+	u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup);
+	if (total_invalid)
+		pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
+			bpid, bpid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(bman_release_bpid_range);
+
+void bman_seed_bpid_range(u32 bpid, u32 count)
+{
+	dpa_alloc_seed(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_seed_bpid_range);
+
+int bman_reserve_bpid_range(u32 bpid, u32 count)
+{
+	return dpa_alloc_reserve(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_reserve_bpid_range);
+#endif	/* CONFIG_FSL_BMAN */
diff --git a/drivers/soc/fsl/dpaa_sys.h b/drivers/soc/fsl/dpaa_sys.h
new file mode 100644
index 0000000..58e6edb
--- /dev/null
+++ b/drivers/soc/fsl/dpaa_sys.h
@@ -0,0 +1,234 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DPA_SYS_H
+#define DPA_SYS_H
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/kthread.h>
+#include <linux/memblock.h>
+#include <linux/completion.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <sysdev/fsl_soc.h>
+#include <linux/fsl_hypervisor.h>
+#include <linux/vmalloc.h>
+#include <linux/ctype.h>
+#include <linux/math64.h>
+
+/* When copying aligned words or shorts, try to avoid memcpy() */
+#define CONFIG_TRY_BETTER_MEMCPY
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPA_PORTAL_CE 0
+#define DPA_PORTAL_CI 1
+
+/***********************/
+/* Misc inline assists */
+/***********************/
+
+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
+ * barriers and that dcb*() won't fall victim to compiler or execution
+ * reordering with respect to other code/instructions that manipulate the same
+ * cacheline. */
+#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
+#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
+#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
+#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
+#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
+#define dcbi(p) dcbf(p)
+#ifdef CONFIG_PPC_E500MC
+#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
+#define dcbz_64(p) dcbzl(p)
+#define dcbf_64(p) dcbf(p)
+/* Commonly used combo */
+#define dcbit_ro(p) \
+	do { \
+		dcbi(p); \
+		dcbt_ro(p); \
+	} while (0)
+#else
+#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
+#define dcbz_64(p) \
+	do { \
+		dcbz((u32)p + 32);	\
+		dcbz(p);	\
+	} while (0)
+#define dcbf_64(p) \
+	do { \
+		dcbf((u32)p + 32); \
+		dcbf(p); \
+	} while (0)
+/* Commonly used combo */
+#define dcbit_ro(p) \
+	do { \
+		dcbi(p); \
+		dcbi((u32)p + 32); \
+		dcbt_ro(p); \
+		dcbt_ro((u32)p + 32); \
+	} while (0)
+#endif /* CONFIG_PPC_E500MC */
+
+static inline u64 mfatb(void)
+{
+	u32 hi, lo, chk;
+	do {
+		hi = mfspr(SPRN_ATBU);
+		lo = mfspr(SPRN_ATBL);
+		chk = mfspr(SPRN_ATBU);
+	} while (unlikely(hi != chk));
+	return ((u64)hi << 32) | (u64)lo;
+}
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+#define DPA_ASSERT(x) \
+	do { \
+		if (!(x)) { \
+			pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \
+				__stringify_1(x)); \
+			dump_stack(); \
+			panic("assertion failure"); \
+		} \
+	} while (0)
+#else
+#define DPA_ASSERT(x)
+#endif
+
+/* memcpy() stuff - when you know alignments in advance */
+#ifdef CONFIG_TRY_BETTER_MEMCPY
+static inline void copy_words(void *dest, const void *src, size_t sz)
+{
+	u32 *__dest = dest;
+	const u32 *__src = src;
+	size_t __sz = sz >> 2;
+	BUG_ON((unsigned long)dest & 0x3);
+	BUG_ON((unsigned long)src & 0x3);
+	BUG_ON(sz & 0x3);
+	while (__sz--)
+		*(__dest++) = *(__src++);
+}
+static inline void copy_shorts(void *dest, const void *src, size_t sz)
+{
+	u16 *__dest = dest;
+	const u16 *__src = src;
+	size_t __sz = sz >> 1;
+	BUG_ON((unsigned long)dest & 0x1);
+	BUG_ON((unsigned long)src & 0x1);
+	BUG_ON(sz & 0x1);
+	while (__sz--)
+		*(__dest++) = *(__src++);
+}
+static inline void copy_bytes(void *dest, const void *src, size_t sz)
+{
+	u8 *__dest = dest;
+	const u8 *__src = src;
+	while (sz--)
+		*(__dest++) = *(__src++);
+}
+#else
+#define copy_words memcpy
+#define copy_shorts memcpy
+#define copy_bytes memcpy
+#endif
+
+/************/
+/* Bootargs */
+/************/
+
+/* BMan has "bportals=", they use the same syntax
+ * though; a comma-separated list of items, each item being a cpu index and/or a
+ * range of cpu indices, and each item optionally be prefixed by "s" to indicate
+ * that the portal associated with that cpu should be shared. See bman_driver.c
+ * for more specifics. */
+static int __parse_portals_cpu(const char **s, unsigned int *cpu)
+{
+	*cpu = 0;
+	if (!isdigit(**s))
+		return -EINVAL;
+	while (isdigit(**s))
+		*cpu = *cpu * 10 + (*((*s)++) - '0');
+	return 0;
+}
+static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
+					struct cpumask *want_unshared,
+					const char *argname)
+{
+	const char *s = str;
+	unsigned int shared, cpu1, cpu2, loop;
+
+keep_going:
+	if (*s == 's') {
+		shared = 1;
+		s++;
+	} else
+		shared = 0;
+	if (__parse_portals_cpu(&s, &cpu1))
+		goto err;
+	if (*s == '-') {
+		s++;
+		if (__parse_portals_cpu(&s, &cpu2))
+			goto err;
+		if (cpu2 < cpu1)
+			goto err;
+	} else
+		cpu2 = cpu1;
+	for (loop = cpu1; loop <= cpu2; loop++)
+		cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
+	if (*s == ',') {
+		s++;
+		goto keep_going;
+	} else if ((*s == '\0') || isspace(*s))
+		return 0;
+err:
+	pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
+		(unsigned long)s - (unsigned long)str);
+	return -EINVAL;
+}
+
+#endif /* DPA_SYS_H */
diff --git a/drivers/soc/fsl/qbman_driver.c b/drivers/soc/fsl/qbman_driver.c
new file mode 100644
index 0000000..88ee7a8
--- /dev/null
+++ b/drivers/soc/fsl/qbman_driver.c
@@ -0,0 +1,40 @@
+/* Copyright 2013 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/time.h>
+#include "bman_priv.h"
+
+static __init int qbman_init(void)
+{
+	bman_init();
+
+	return 0;
+}
+subsys_initcall(qbman_init);
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
new file mode 100644
index 0000000..e30f36d
--- /dev/null
+++ b/include/soc/fsl/bman.h
@@ -0,0 +1,510 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FSL_BMAN_H
+#define FSL_BMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Last updated for v00.79 of the BG */
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI	0x00000002	/* RCR Ring (below threshold) */
+#define BM_PIRQ_BSCN	0x00000001	/* Buffer depletion State Change */
+
+/* This wrapper represents a bit-array for the depletion state of the 64 BMan
+ * buffer pools. */
+struct bman_depletion {
+	u32 __state[2];
+};
+#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
+#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
+#define __bmdep_word(x) ((x) >> 5)
+#define __bmdep_shift(x) ((x) & 0x1f)
+#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
+static inline void bman_depletion_init(struct bman_depletion *c)
+{
+	c->__state[0] = c->__state[1] = 0;
+}
+static inline void bman_depletion_fill(struct bman_depletion *c)
+{
+	c->__state[0] = c->__state[1] = ~0;
+}
+static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
+{
+	return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
+}
+static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
+{
+	c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
+}
+static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
+{
+	c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
+}
+
+/* ------------------------------------------------------- */
+/* --- BMan data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct bm_rcr_entry;	/* RCR (Release Command Ring) entries */
+struct bm_mc_command;	/* MC (Management Command) command */
+struct bm_mc_result;	/* MC result */
+
+/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
+ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
+ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
+struct bm_buffer {
+	union {
+		struct {
+			u8 __reserved1;
+			u8 bpid;
+			u16 hi; /* High 16-bits of 48-bit address */
+			u32 lo; /* Low 32-bits of 48-bit address */
+		};
+		struct {
+			u64 __notaddress:16;
+			u64 addr:48;
+		};
+	};
+} __aligned(8);
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+	return buf->addr;
+}
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+	return (dma_addr_t)buf->addr;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define bm_buffer_set64(buf, v) \
+	do { \
+		struct bm_buffer *__buf931 = (buf); \
+		__buf931->hi = upper_32_bits(v); \
+		__buf931->lo = lower_32_bits(v); \
+	} while (0)
+
+/* See 1.5.3.5.4: "Release Command" */
+struct bm_rcr_entry {
+	union {
+		struct {
+			u8 __dont_write_directly__verb;
+			u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+			u8 __reserved1[62];
+		};
+		struct bm_buffer bufs[8];
+	};
+} __packed;
+#define BM_RCR_VERB_VBIT		0x80
+#define BM_RCR_VERB_CMD_MASK		0x70	/* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE	0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI	0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK	0x0f	/* values 1..8 */
+
+/* See 1.5.3.1: "Acquire Command" */
+/* See 1.5.3.2: "Query Command" */
+struct bm_mcc_acquire {
+	u8 bpid;
+	u8 __reserved1[62];
+} __packed;
+struct bm_mcc_query {
+	u8 __reserved2[63];
+} __packed;
+struct bm_mc_command {
+	u8 __dont_write_directly__verb;
+	union {
+		struct bm_mcc_acquire acquire;
+		struct bm_mcc_query query;
+	};
+} __packed;
+#define BM_MCC_VERB_VBIT		0x80
+#define BM_MCC_VERB_CMD_MASK		0x70	/* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE		0x10
+#define BM_MCC_VERB_CMD_QUERY		0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT	0x0f	/* values 1..8 go here */
+
+/* See 1.5.3.3: "Acquire Response" */
+/* See 1.5.3.4: "Query Response" */
+struct bm_pool_state {
+	u8 __reserved1[32];
+	/* "availability state" and "depletion state" */
+	struct {
+		u8 __reserved1[8];
+		/* Access using bman_depletion_***() */
+		struct bman_depletion state;
+	} as, ds;
+};
+struct bm_mc_result {
+	union {
+		struct {
+			u8 verb;
+			u8 __reserved1[63];
+		};
+		union {
+			struct {
+				u8 __reserved1;
+				u8 bpid;
+				u8 __reserved2[62];
+			};
+			struct bm_buffer bufs[8];
+		} acquire;
+		struct bm_pool_state query;
+	};
+} __packed;
+#define BM_MCR_VERB_VBIT		0x80
+#define BM_MCR_VERB_CMD_MASK		BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE		BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY		BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID	0x60
+#define BM_MCR_VERB_CMD_ERR_ECC		0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT	BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+/* Determine the "availability state" of pool 'p' from a query result 'r' */
+#define BM_MCR_QUERY_AVAILABILITY(r, p)	\
+		bman_depletion_get(&r->query.as.state, p)
+/* Determine the "depletion state" of pool 'p' from a query result 'r' */
+#define BM_MCR_QUERY_DEPLETION(r, p)	\
+		bman_depletion_get(&r->query.ds.state, p)
+
+/*******************************************************************/
+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
+/*******************************************************************/
+
+	/* Portal and Buffer Pools */
+	/* ----------------------- */
+/* Represents a managed portal */
+struct bman_portal;
+
+/* This object type represents BMan buffer pools. */
+struct bman_pool;
+
+struct bman_portal_config {
+	/* This is used for any "core-affine" portals, ie. default portals
+	 * associated to the corresponding cpu. -1 implies that there is no core
+	 * affinity configured. */
+	int cpu;
+	/* portal interrupt line */
+	int irq;
+	/* Is this portal shared? (If so, it has coarser locking and demuxes
+	 * processing on behalf of other CPUs.) */
+	int is_shared;
+	/* These are the buffer pool IDs that may be used via this portal. */
+	struct bman_depletion mask;
+};
+
+/* This callback type is used when handling pool depletion entry/exit. The
+ * 'cb_ctx' value is the opaque value associated with the pool object in
+ * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
+ * depletion-exit. */
+typedef void (*bman_cb_depletion)(struct bman_portal *bm,
+			struct bman_pool *pool, void *cb_ctx, int depleted);
+
+/* This struct specifies parameters for a bman_pool object. */
+struct bman_pool_params {
+	/* index of the buffer pool to encapsulate (0-63), ignored if
+	 * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
+	u32 bpid;
+	/* bit-mask of BMAN_POOL_FLAG_*** options */
+	u32 flags;
+	/* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
+	bman_cb_depletion cb;
+	/* opaque user value passed as a parameter to 'cb' */
+	void *cb_ctx;
+	/* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
+	 * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
+	 * when run in the control plane (which controls BMan CCSR). This array
+	 * matches the definition of bm_pool_set(). */
+	u32 thresholds[4];
+};
+
+/* Flags to bman_new_pool() */
+#define BMAN_POOL_FLAG_NO_RELEASE    0x00000001 /* can't release to pool */
+#define BMAN_POOL_FLAG_ONLY_RELEASE  0x00000002 /* can only release to pool */
+#define BMAN_POOL_FLAG_DEPLETION     0x00000004 /* track depletion entry/exit */
+#define BMAN_POOL_FLAG_DYNAMIC_BPID  0x00000008 /* (de)allocate bpid */
+#define BMAN_POOL_FLAG_THRESH	     0x00000010 /* set depletion thresholds */
+#define BMAN_POOL_FLAG_STOCKPILE     0x00000020 /* stockpile to reduce hw ops */
+
+/* Flags to bman_release() */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+#define BMAN_RELEASE_FLAG_WAIT	     0x00000001 /* wait if RCR is full */
+#define BMAN_RELEASE_FLAG_WAIT_INT   0x00000002 /* if we wait, interruptible? */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+#define BMAN_RELEASE_FLAG_WAIT_SYNC  0x00000004 /* if wait, until consumed? */
+#endif
+#endif
+#define BMAN_RELEASE_FLAG_NOW	     0x00000008 /* issue immediate release */
+
+/* Flags to bman_acquire() */
+#define BMAN_ACQUIRE_FLAG_STOCKPILE  0x00000001 /* no hw op, stockpile only */
+
+	/* Portal Management */
+	/* ----------------- */
+/**
+ * bman_get_portal_config - get portal configuration settings
+ *
+ * This returns a read-only view of the current cpu's affine portal settings.
+ */
+const struct bman_portal_config *bman_get_portal_config(void);
+
+/**
+ * bman_irqsource_get - return the portal work that is interrupt-driven
+ *
+ * Returns a bitmask of BM_PIRQ_**I processing sources that are currently
+ * enabled for interrupt handling on the current cpu's affine portal. These
+ * sources will trigger the portal interrupt and the interrupt handler (or a
+ * tasklet/bottom-half it defers to) will perform the corresponding processing
+ * work. The bman_poll_***() functions will only process sources that are not in
+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
+ * this always returns zero.
+ */
+u32 bman_irqsource_get(void);
+
+/**
+ * bman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of BM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via bman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
+int bman_irqsource_add(u32 bits);
+
+/**
+ * bman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of BM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via bman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
+int bman_irqsource_remove(u32 bits);
+
+/**
+ * bman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *bman_affine_cpus(void);
+
+/**
+ * bman_poll_slow - process anything that isn't interrupt-driven.
+ *
+ * This function does any portal processing that isn't interrupt-driven. If the
+ * current CPU is sharing a portal hosted on another CPU, this function will
+ * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
+ * indicating what interrupt sources were actually processed by the call.
+ *
+ * NB, unlike the legacy wrapper bman_poll(), this function will
+ * deterministically check for the presence of portal processing work and do it,
+ * which implies some latency even if there's nothing to do. The bman_poll()
+ * wrapper on the other hand attenuates this by
+ * checking for (and doing) portal processing infrequently. Ie. such that
+ * bman_poll() can be called from core-processing loops. Use
+ * bman_poll_slow() when you yourself are deciding when to incur the overhead of
+ * processing.
+ */
+u32 bman_poll_slow(void);
+
+/**
+ * bman_poll - process anything that isn't interrupt-driven.
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. This function does whatever processing is not triggered by
+ * interrupts. This is a legacy wrapper that can be used in core-processing
+ * loops but mitigates the performance overhead of portal processing by
+ * adaptively bypassing true portal processing most of the time. (Processing is
+ * done once every 10 calls if the previous processing revealed that work needed
+ * to be done, or once very 1000 calls if the previous processing revealed no
+ * work needed doing.) If you wish to control this yourself, call
+ * bman_poll_slow() instead, which always checks for portal processing work.
+ */
+void bman_poll(void);
+
+/**
+ * bman_rcr_is_empty - Determine if portal's RCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * releases for the local portal have been processed by BMan but can't use the
+ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
+ * The function forces tracking of RCR consumption (which normally doesn't
+ * happen until release processing needs to find space to put new release
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int bman_rcr_is_empty(void);
+
+/**
+ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
+ * @result: is set by the API to the base BPID of the allocated range
+ * @count: the number of BPIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count BPIDs
+ *
+ * Returns the number of buffer pools allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * BPs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int bman_alloc_bpid(u32 *result)
+{
+	int ret = bman_alloc_bpid_range(result, 1, 0, 0);
+	return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * bman_release_bpid_range - Release the specified range of buffer pool IDs
+ * @bpid: the base BPID of the range to deallocate
+ * @count: the number of BPIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of BPIDs
+ * that it can subsequently allocate from.
+ */
+void bman_release_bpid_range(u32 bpid, unsigned int count);
+static inline void bman_release_bpid(u32 bpid)
+{
+	bman_release_bpid_range(bpid, 1);
+}
+
+int bman_reserve_bpid_range(u32 bpid, unsigned int count);
+static inline int bman_reserve_bpid(u32 bpid)
+{
+	return bman_reserve_bpid_range(bpid, 1);
+}
+
+void bman_seed_bpid_range(u32 bpid, unsigned int count);
+
+
+int bman_shutdown_pool(u32 bpid);
+
+	/* Pool management */
+	/* --------------- */
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ * @params: parameters specifying the buffer pool ID and behaviour
+ *
+ * Creates a pool object for the given @params. A portal and the depletion
+ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
+ * is set. NB, the fields from @params are copied into the new pool object, so
+ * the structure provided by the caller can be released or reused after the
+ * function returns.
+ */
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ *
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_params - Returns a pool object's parameters.
+ * @pool: the pool object
+ *
+ * The returned pointer refers to state within the pool object so must not be
+ * modified and can no longer be read once the pool object is destroyed.
+ */
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ * Adds the given buffers to RCR entries. If the portal @p was created with the
+ * "COMPACT" flag, then it will be using a compaction algorithm to improve
+ * utilisation of RCR. As such, these buffers may join an existing ring entry
+ * and/or it may not be issued right away so as to allow future releases to join
+ * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
+ * behaviour by committing the RCR entry (or entries) right away. If the RCR
+ * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
+ * is selected, in which case it will sleep waiting for space to become
+ * available in RCR. If the function receives a signal before such time (and
+ * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
+ * it returns zero.
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+			u32 flags);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered. In
+ * the latter case, the content of @bufs is undefined.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+			u32 flags);
+
+/**
+ * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
+ * @pool: the buffer pool object the stockpile belongs
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ * Adds stockpile buffers to RCR entries until the stockpile is empty.
+ * The return value will be a negative error code if a h/w error occurred.
+ * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
+ * -EAGAIN will be returned.
+ */
+int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
+
+#ifdef CONFIG_FSL_BMAN_CONFIG
+/**
+ * bman_query_free_buffers - Query how many free buffers are in buffer pool
+ * @pool: the buffer pool object to query
+ *
+ * Return the number of the free buffers
+ */
+u32 bman_query_free_buffers(struct bman_pool *pool);
+
+/**
+ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
+ * @pool: the buffer pool object to which the thresholds will be set
+ * @thresholds: the new thresholds
+ */
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
+#endif
+
+/**
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+*/
+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FSL_BMAN_H */
-- 
2.3.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ