lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1474030832-20611-3-git-send-email-claudiu.manoil@nxp.com>
Date:   Fri, 16 Sep 2016 16:00:29 +0300
From:   Claudiu Manoil <claudiu.manoil@....com>
To:     <linuxppc-dev@...ts.ozlabs.org>,
        Scott Wood <scottwood@...escale.com>
CC:     <linux-kernel@...r.kernel.org>, Roy Pledge <roy.pledge@....com>
Subject: [PATCH 2/5] soc/fsl: Introduce DPAA 1.x QMan device driver

This driver enables the Freescale DPAA 1.x Queue Manager block.
QMan is a hardware accelerator that manages frame queues.  It allows
CPUs and other accelerators connected to the SoC datapath to enqueue
and dequeue ethernet frames, thus providing the infrastructure for
data exchange among CPUs and datapath accelerators.

Signed-off-by: Roy Pledge <roy.pledge@....com>
Signed-off-by: Claudiu Manoil <claudiu.manoil@....com>
---
 drivers/soc/fsl/qbman/Kconfig       |    4 +
 drivers/soc/fsl/qbman/Makefile      |    5 +-
 drivers/soc/fsl/qbman/qman.c        | 2881 +++++++++++++++++++++++++++++++++++
 drivers/soc/fsl/qbman/qman_ccsr.c   |  817 ++++++++++
 drivers/soc/fsl/qbman/qman_portal.c |  354 +++++
 drivers/soc/fsl/qbman/qman_priv.h   |  370 +++++
 include/soc/fsl/qman.h              | 1076 +++++++++++++
 7 files changed, 5505 insertions(+), 2 deletions(-)
 create mode 100644 drivers/soc/fsl/qbman/qman.c
 create mode 100644 drivers/soc/fsl/qbman/qman_ccsr.c
 create mode 100644 drivers/soc/fsl/qbman/qman_portal.c
 create mode 100644 drivers/soc/fsl/qbman/qman_priv.h
 create mode 100644 include/soc/fsl/qman.h

diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
index 88ce5c6..0abb9c8 100644
--- a/drivers/soc/fsl/qbman/Kconfig
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -13,6 +13,10 @@ menuconfig FSL_DPAA
 	  that allows software and accelerators on the datapath to acquire and
 	  release buffers in order to build frames.
 
+	  The Queue Manager (QMan) is a hardware queue management block
+	  that allows software and accelerators on the datapath to enqueue and
+	  dequeue frames in order to communicate.
+
 if FSL_DPAA
 
 config FSL_DPAA_CHECKING
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
index 855c3ac..6e0ee30 100644
--- a/drivers/soc/fsl/qbman/Makefile
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_FSL_DPAA)                          += bman_ccsr.o bman_portal.o \
-						   bman.o
+obj-$(CONFIG_FSL_DPAA)                          += bman_ccsr.o qman_ccsr.o \
+						   bman_portal.o qman_portal.o \
+						   bman.o qman.o
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 0000000..a63524f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,2881 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL	15
+#define EQCR_ITHRESH	4	/* if EQCR congests, interrupt threshold */
+#define IRQNAME		"QMan portal %d"
+#define MAX_IRQNAME	16	/* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH	0x0000
+#define QM_REG_EQCR_CI_CINH	0x0004
+#define QM_REG_EQCR_ITR		0x0008
+#define QM_REG_DQRR_PI_CINH	0x0040
+#define QM_REG_DQRR_CI_CINH	0x0044
+#define QM_REG_DQRR_ITR		0x0048
+#define QM_REG_DQRR_DCAP	0x0050
+#define QM_REG_DQRR_SDQCR	0x0054
+#define QM_REG_DQRR_VDQCR	0x0058
+#define QM_REG_DQRR_PDQCR	0x005c
+#define QM_REG_MR_PI_CINH	0x0080
+#define QM_REG_MR_CI_CINH	0x0084
+#define QM_REG_MR_ITR		0x0088
+#define QM_REG_CFG		0x0100
+#define QM_REG_ISR		0x0e00
+#define QM_REG_IER		0x0e04
+#define QM_REG_ISDR		0x0e08
+#define QM_REG_IIR		0x0e0c
+#define QM_REG_ITPR		0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR		0x0000
+#define QM_CL_DQRR		0x1000
+#define QM_CL_MR		0x2000
+#define QM_CL_EQCR_PI_CENA	0x3000
+#define QM_CL_EQCR_CI_CENA	0x3100
+#define QM_CL_DQRR_PI_CENA	0x3200
+#define QM_CL_DQRR_CI_CENA	0x3300
+#define QM_CL_MR_PI_CENA	0x3400
+#define QM_CL_MR_CI_CENA	0x3500
+#define QM_CL_CR		0x3800
+#define QM_CL_RR0		0x3900
+#define QM_CL_RR1		0x3940
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx)	((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *     dmode == h/w dequeue mode.
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ *     dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ *   As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode {		/* matches QCSP_CFG::EPM */
+	qm_eqcr_pci = 0,	/* PI index, cache-inhibited */
+	qm_eqcr_pce = 1,	/* PI index, cache-enabled */
+	qm_eqcr_pvb = 2		/* valid-bit */
+};
+enum qm_dqrr_dmode {		/* matches QCSP_CFG::DP */
+	qm_dqrr_dpush = 0,	/* SDQCR  + VDQCR */
+	qm_dqrr_dpull = 1	/* PDQCR */
+};
+enum qm_dqrr_pmode {		/* s/w-only */
+	qm_dqrr_pci,		/* reads DQRR_PI_CINH */
+	qm_dqrr_pce,		/* reads DQRR_PI_CENA */
+	qm_dqrr_pvb		/* reads valid-bit */
+};
+enum qm_dqrr_cmode {		/* matches QCSP_CFG::DCM */
+	qm_dqrr_cci = 0,	/* CI index, cache-inhibited */
+	qm_dqrr_cce = 1,	/* CI index, cache-enabled */
+	qm_dqrr_cdc = 2		/* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode {		/* s/w-only */
+	qm_mr_pci,		/* reads MR_PI_CINH */
+	qm_mr_pce,		/* reads MR_PI_CENA */
+	qm_mr_pvb		/* reads valid-bit */
+};
+enum qm_mr_cmode {		/* matches QCSP_CFG::MM */
+	qm_mr_cci = 0,		/* CI index, cache-inhibited */
+	qm_mr_cce = 1		/* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE		8
+#define QM_DQRR_SIZE		16
+#define QM_MR_SIZE		8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+	u8 _ncw_verb; /* writes to this are non-coherent */
+	u8 dca;
+	u16 seqnum;
+	u32 orp;	/* 24-bit */
+	u32 fqid;	/* 24-bit */
+	u32 tag;
+	struct qm_fd fd;
+	u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT		0x80
+#define QM_EQCR_VERB_CMD_MASK		0x61	/* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE	0x01
+#define QM_EQCR_SEQNUM_NESN		0x8000	/* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS		0x4000	/* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK		0x3fff	/* sequence number goes here */
+
+struct qm_eqcr {
+	struct qm_eqcr_entry *ring, *cursor;
+	u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	u32 busy;
+	enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+	const struct qm_dqrr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum qm_dqrr_dmode dmode;
+	enum qm_dqrr_pmode pmode;
+	enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+	union qm_mr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum qm_mr_pmode pmode;
+	enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "Query FQ" */
+struct qm_mcc_queryfq {
+	u8 _ncw_verb;
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2[56];
+} __packed;
+/* "Alter FQ State Commands " */
+struct qm_mcc_alterfq {
+	u8 _ncw_verb;
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2;
+	u8 count;	/* number of consecutive FQID */
+	u8 __reserved3[10];
+	u32 context_b;	/* frame queue context b */
+	u8 __reserved4[40];
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcc_querycgr {
+	u8 _ncw_verb;
+	u8 __reserved1[30];
+	u8 cgid;
+	u8 __reserved2[32];
+};
+
+struct qm_mcc_querywq {
+	u8 _ncw_verb;
+	u8 __reserved;
+	/* select channel if verb != QUERYWQ_DEDICATED */
+	u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+	u8 __reserved2[60];
+} __packed;
+
+#define QM_MCC_VERB_VBIT		0x80
+#define QM_MCC_VERB_MASK		0x7f	/* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED	0x40
+#define QM_MCC_VERB_INITFQ_SCHED	0x41
+#define QM_MCC_VERB_QUERYFQ		0x44
+#define QM_MCC_VERB_QUERYFQ_NP		0x45	/* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ		0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED	0x47
+#define QM_MCC_VERB_ALTER_SCHED		0x48	/* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE		0x49	/* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE	0x4a	/* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS		0x4b	/* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON		0x4d	/* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF	0x4e	/* FQ XOFF */
+#define QM_MCC_VERB_INITCGR		0x50
+#define QM_MCC_VERB_MODIFYCGR		0x51
+#define QM_MCC_VERB_CGRTESTWRITE	0x52
+#define QM_MCC_VERB_QUERYCGR		0x58
+#define QM_MCC_VERB_QUERYCONGESTION	0x59
+union qm_mc_command {
+	struct {
+		u8 _ncw_verb; /* writes to this are non-coherent */
+		u8 __reserved[63];
+	};
+	struct qm_mcc_initfq initfq;
+	struct qm_mcc_queryfq queryfq;
+	struct qm_mcc_alterfq alterfq;
+	struct qm_mcc_initcgr initcgr;
+	struct qm_mcc_querycgr querycgr;
+	struct qm_mcc_querywq querywq;
+	struct qm_mcc_queryfq_np queryfq_np;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+	u8 verb;
+	u8 result;
+	u8 __reserved1[8];
+	struct qm_fqd fqd;	/* the FQD fields are here */
+	u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+	u8 verb;
+	u8 result;
+	u8 fqs;		/* Frame Queue Status */
+	u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID		0x80
+#define QM_MCR_VERB_MASK		QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED	QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED	QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ		QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP		QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ		QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED	QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED		QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE		QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE	QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS		QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL		0x00
+#define QM_MCR_RESULT_OK		0xf0
+#define QM_MCR_RESULT_ERR_FQID		0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE	0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY	0xf3	/* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL	0xf4
+#define QM_MCR_RESULT_PENDING		0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND	0xff
+#define QM_MCR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT			10000	/* us */
+union qm_mc_result {
+	struct {
+		u8 verb;
+		u8 result;
+		u8 __reserved1[62];
+	};
+	struct qm_mcr_queryfq queryfq;
+	struct qm_mcr_alterfq alterfq;
+	struct qm_mcr_querycgr querycgr;
+	struct qm_mcr_querycongestion querycongestion;
+	struct qm_mcr_querywq querywq;
+	struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+	union qm_mc_command *cr;
+	union qm_mc_result *rr;
+	u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum {
+		/* Can be _mc_start()ed */
+		qman_mc_idle,
+		/* Can be _mc_commit()ed or _mc_abort()ed */
+		qman_mc_user,
+		/* Can only be _mc_retry()ed */
+		qman_mc_hw
+	} state;
+#endif
+};
+
+struct qm_addr {
+	void __iomem *ce;	/* cache-enabled */
+	void __iomem *ci;	/* cache-inhibited */
+};
+
+struct qm_portal {
+	/*
+	 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+	 * and including 'mc' fits within a cacheline (yay!). The 'config' part
+	 * is setup-only, so isn't a cause for a concern. In other words, don't
+	 * rearrange this structure on a whim, there be dragons ...
+	 */
+	struct qm_addr addr;
+	struct qm_eqcr eqcr;
+	struct qm_dqrr dqrr;
+	struct qm_mr mr;
+	struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+	return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+	__raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+	dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+	dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+	return __raw_readl(p->addr.ce + offset);
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT	ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY	(uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~EQCR_CARRY;
+
+	return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+	return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+	/* increment to the next EQCR pointer and handle overflow and 'vbit' */
+	struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+	eqcr->cursor = eqcr_carryclear(partial);
+	if (partial != eqcr->cursor)
+		eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+				enum qm_eqcr_pmode pmode,
+				unsigned int eq_stash_thresh,
+				int eq_stash_prio)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u32 cfg;
+	u8 pi;
+
+	eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+	eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+	pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	eqcr->cursor = eqcr->ring + pi;
+	eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+		     QM_EQCR_VERB_VBIT : 0;
+	eqcr->available = QM_EQCR_SIZE - 1 -
+			  dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+	eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 0;
+	eqcr->pmode = pmode;
+#endif
+	cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+	      (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+	      (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+	      ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+	qm_out(portal, QM_REG_CFG, cfg);
+	return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+	return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+	DPAA_ASSERT(!eqcr->busy);
+	if (pi != eqcr_ptr2idx(eqcr->cursor))
+		pr_crit("losing uncommited EQCR entries\n");
+	if (ci != eqcr->ci)
+		pr_crit("missing existing EQCR completions\n");
+	if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+		pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+								 *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPAA_ASSERT(!eqcr->busy);
+	if (!eqcr->available)
+		return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dpaa_zero(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+								*portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci;
+
+	DPAA_ASSERT(!eqcr->busy);
+	if (!eqcr->available) {
+		old_ci = eqcr->ci;
+		eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+			   (QM_EQCR_SIZE - 1);
+		diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+		eqcr->available += diff;
+		if (!diff)
+			return NULL;
+	}
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dpaa_zero(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+	DPAA_ASSERT(eqcr->busy);
+	DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
+	DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+	DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	struct qm_eqcr_entry *eqcursor;
+
+	eqcr_commit_checks(eqcr);
+	DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+	dma_wmb();
+	eqcursor = eqcr->cursor;
+	eqcursor->_ncw_verb = myverb | eqcr->vbit;
+	dpaa_flush(eqcursor);
+	eqcr_inc(eqcr);
+	eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+	qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci = eqcr->ci;
+
+	eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+	diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+	eqcr->available += diff;
+	return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	eqcr->ithresh = ithresh;
+	qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT	ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY	(uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+					const struct qm_dqrr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~DQRR_CARRY;
+
+	return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+	return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+	return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+	qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+				   ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+			       const struct qm_portal_config *config,
+			       enum qm_dqrr_dmode dmode,
+			       enum qm_dqrr_pmode pmode,
+			       enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+	u32 cfg;
+
+	/* Make sure the DQRR will be idle when we enable */
+	qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+	qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+	qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+	dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+	dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->cursor = dqrr->ring + dqrr->ci;
+	dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+	dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+			QM_DQRR_VERB_VBIT : 0;
+	dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	dqrr->dmode = dmode;
+	dqrr->pmode = pmode;
+	dqrr->cmode = cmode;
+#endif
+	/* Invalidate every ring entry before beginning */
+	for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+		dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+	cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+		((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+		((dmode & 1) << 18) |			/* DP */
+		((cmode & 3) << 16) |			/* DCM */
+		0xa0 |					/* RE+SE */
+		(0 ? 0x40 : 0) |			/* Ignore RP */
+		(0 ? 0x10 : 0);				/* Ignore SP */
+	qm_out(portal, QM_REG_CFG, cfg);
+	qm_dqrr_set_maxfill(portal, max_fill);
+	return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct qm_dqrr *dqrr = &portal->dqrr;
+
+	if (dqrr->cmode != qm_dqrr_cdc &&
+	    dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+		pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+						struct qm_portal *portal)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+
+	if (!dqrr->fill)
+		return NULL;
+	return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPAA_ASSERT(dqrr->fill);
+	dqrr->cursor = dqrr_inc(dqrr->cursor);
+	return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+	struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+	DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+	/*
+	 * If PAMU is not available we need to invalidate the cache.
+	 * When PAMU is available the cache is updated by stash
+	 */
+	dpaa_invalidate_touch_ro(res);
+#endif
+	/*
+	 *  when accessing 'verb', use __raw_readb() to ensure that compiler
+	 * inlining doesn't try to optimise out "excess reads".
+	 */
+	if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+		dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+		if (!dqrr->pi)
+			dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+		dqrr->fill++;
+	}
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+					const struct qm_dqrr_entry *dq,
+					int park)
+{
+	__maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+	int idx = dqrr_ptr2idx(dq);
+
+	DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	DPAA_ASSERT((dqrr->ring + idx) == dq);
+	DPAA_ASSERT(idx < QM_DQRR_SIZE);
+	qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+	       ((park ? 1 : 0) << 6) |		    /* DQRR_DCAP::PK */
+	       idx);				    /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+	__maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+	       (bitmask << 16));		    /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+	qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+	qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT	ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY	(uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~MR_CARRY;
+
+	return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+	return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+	return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+			     enum qm_mr_cmode cmode)
+{
+	struct qm_mr *mr = &portal->mr;
+	u32 cfg;
+
+	mr->ring = portal->addr.ce + QM_CL_MR;
+	mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+	mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+	mr->cursor = mr->ring + mr->ci;
+	mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+	mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+		? QM_MR_VERB_VBIT : 0;
+	mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mr->pmode = pmode;
+	mr->cmode = cmode;
+#endif
+	cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+	      ((cmode & 1) << 8);	/* QCSP_CFG:MM */
+	qm_out(portal, QM_REG_CFG, cfg);
+	return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	if (mr->ci != mr_ptr2idx(mr->cursor))
+		pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	if (!mr->fill)
+		return NULL;
+	return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	DPAA_ASSERT(mr->fill);
+	mr->cursor = mr_inc(mr->cursor);
+	return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+	union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+	DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+	/*
+	 *  when accessing 'verb', use __raw_readb() to ensure that compiler
+	 * inlining doesn't try to optimise out "excess reads".
+	 */
+	if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+		mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+		if (!mr->pi)
+			mr->vbit ^= QM_MR_VERB_VBIT;
+		mr->fill++;
+		res = mr_inc(res);
+	}
+	dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	DPAA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+	qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	DPAA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = mr_ptr2idx(mr->cursor);
+	qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+	struct qm_mc *mc = &portal->mc;
+
+	mc->cr = portal->addr.ce + QM_CL_CR;
+	mc->rr = portal->addr.ce + QM_CL_RR0;
+	mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
+		    ? 0 : 1;
+	mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct qm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == qman_mc_idle);
+	if (mc->state != qman_mc_idle)
+		pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+	struct qm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_user;
+#endif
+	dpaa_zero(mc->cr);
+	return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+	struct qm_mc *mc = &portal->mc;
+	union qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == qman_mc_user);
+	dma_wmb();
+	mc->cr->_ncw_verb = myverb | mc->vbit;
+	dpaa_flush(mc->cr);
+	dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+	struct qm_mc *mc = &portal->mc;
+	union qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == qman_mc_hw);
+	/*
+	 *  The inactive response register's verb byte always returns zero until
+	 * its command is submitted and completed. This includes the valid-bit,
+	 * in case you were wondering...
+	 */
+	if (!__raw_readb(&rr->verb)) {
+		dpaa_invalidate_touch_ro(rr);
+		return NULL;
+	}
+	mc->rridx ^= 1;
+	mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+				       union qm_mc_result **mcr)
+{
+	int timeout = QM_MCR_TIMEOUT;
+
+	do {
+		*mcr = qm_mc_result(portal);
+		if (*mcr)
+			break;
+		udelay(1);
+	} while (--timeout);
+
+	return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+	set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+	clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+	return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+	return !(fq->flags & mask);
+}
+
+struct qman_portal {
+	struct qm_portal p;
+	/* PORTAL_BITS_*** - dynamic, strictly internal */
+	unsigned long bits;
+	/* interrupt sources processed by portal_isr(), configurable */
+	unsigned long irq_sources;
+	u32 use_eqcr_ci_stashing;
+	/* only 1 volatile dequeue at a time */
+	struct qman_fq *vdqcr_owned;
+	u32 sdqcr;
+	/* probing time config params for cpu-affine portals */
+	const struct qm_portal_config *config;
+	/* needed for providing a non-NULL device to dma_map_***() */
+	struct platform_device *pdev;
+	/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+	struct qman_cgrs *cgrs;
+	/* linked-list of CSCN handlers. */
+	struct list_head cgr_cbs;
+	/* list lock */
+	spinlock_t cgr_lock;
+	struct work_struct congestion_work;
+	struct work_struct mr_work;
+	char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+void *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+	return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+	put_cpu_var(qman_affine_portal);
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_wq_alloc(void)
+{
+	qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+	if (!qm_portal_wq)
+		return -ENOMEM;
+	return 0;
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+	num_fqids = _num_fqids;
+
+	fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
+	if (!fq_table)
+		return -ENOMEM;
+
+	pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+		 fq_table, num_fqids * 2);
+	return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+	struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (WARN_ON(idx >= num_fqids * 2))
+		return NULL;
+#endif
+	fq = fq_table[idx];
+	DPAA_ASSERT(!fq || idx == fq->idx);
+
+	return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+	return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+	return idx_to_fq(tag);
+#else
+	return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+	return fq->idx;
+#else
+	return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+	struct qman_portal *p = ptr;
+
+	u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+	u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+
+	if (unlikely(!is))
+		return IRQ_NONE;
+
+	/* DQRR-handling if it's interrupt-driven */
+	if (is & QM_PIRQ_DQRI)
+		__poll_portal_fast(p, QMAN_POLL_LIMIT);
+	/* Handling of anything else that's interrupt-driven */
+	clear |= __poll_portal_slow(p, is);
+	qm_out(&p->p, QM_REG_ISR, clear);
+	return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+	const union qm_mr_entry *msg;
+loop:
+	msg = qm_mr_current(p);
+	if (!msg) {
+		/*
+		 * if MR was full and h/w had other FQRNI entries to produce, we
+		 * need to allow it time to produce those entries once the
+		 * existing entries are consumed. A worst-case situation
+		 * (fully-loaded system) means h/w sequencers may have to do 3-4
+		 * other things before servicing the portal's MR pump, each of
+		 * which (if slow) may take ~50 qman cycles (which is ~200
+		 * processor cycles). So rounding up and then multiplying this
+		 * worst-case estimate by a factor of 10, just to be
+		 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+		 * one entry at a time, so h/w has an opportunity to produce new
+		 * entries well before the ring has been fully consumed, so
+		 * we're being *really* paranoid here.
+		 */
+		u64 now, then = jiffies;
+
+		do {
+			now = jiffies;
+		} while ((then + 10000) > now);
+		msg = qm_mr_current(p);
+		if (!msg)
+			return 0;
+	}
+	if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+		/* We aren't draining anything but FQRNIs */
+		pr_err("Found verb 0x%x in MR\n", msg->verb);
+		return -1;
+	}
+	qm_mr_next(p);
+	qm_mr_cci_consume(p, 1);
+	goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+			      const struct qm_portal_config *c,
+			      const struct qman_cgrs *cgrs)
+{
+	struct qm_portal *p;
+	char buf[16];
+	int ret;
+	u32 isdr;
+
+	p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+	/* PAMU is required for stashing */
+	portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+	portal->use_eqcr_ci_stashing = 0;
+#endif
+	/*
+	 * prep the low-level portal struct with the mapped addresses from the
+	 * config, everything that follows depends on it and "config" is more
+	 * for (de)reference
+	 */
+	p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+	p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+	/*
+	 * If CI-stashing is used, the current defaults use a threshold of 3,
+	 * and stash with high-than-DQRR priority.
+	 */
+	if (qm_eqcr_init(p, qm_eqcr_pvb,
+			portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+		dev_err(c->dev, "EQCR initialisation failed\n");
+		goto fail_eqcr;
+	}
+	if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+			qm_dqrr_cdc, DQRR_MAXFILL)) {
+		dev_err(c->dev, "DQRR initialisation failed\n");
+		goto fail_dqrr;
+	}
+	if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+		dev_err(c->dev, "MR initialisation failed\n");
+		goto fail_mr;
+	}
+	if (qm_mc_init(p)) {
+		dev_err(c->dev, "MC initialisation failed\n");
+		goto fail_mc;
+	}
+	/* static interrupt-gating controls */
+	qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+	qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+	qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+	portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+	if (!portal->cgrs)
+		goto fail_cgrs;
+	/* initial snapshot is no-depletion */
+	qman_cgrs_init(&portal->cgrs[1]);
+	if (cgrs)
+		portal->cgrs[0] = *cgrs;
+	else
+		/* if the given mask is NULL, assume all CGRs can be seen */
+		qman_cgrs_fill(&portal->cgrs[0]);
+	INIT_LIST_HEAD(&portal->cgr_cbs);
+	spin_lock_init(&portal->cgr_lock);
+	INIT_WORK(&portal->congestion_work, qm_congestion_task);
+	INIT_WORK(&portal->mr_work, qm_mr_process_task);
+	portal->bits = 0;
+	portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+			QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+			QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+	sprintf(buf, "qportal-%d", c->channel);
+	portal->pdev = platform_device_alloc(buf, -1);
+	if (!portal->pdev)
+		goto fail_devalloc;
+	if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
+		goto fail_devadd;
+	ret = platform_device_add(portal->pdev);
+	if (ret)
+		goto fail_devadd;
+	isdr = 0xffffffff;
+	qm_out(p, QM_REG_ISDR, isdr);
+	portal->irq_sources = 0;
+	qm_out(p, QM_REG_IER, 0);
+	qm_out(p, QM_REG_ISR, 0xffffffff);
+	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+	if (request_irq(c->irq, portal_isr, 0, portal->irqname,	portal)) {
+		dev_err(c->dev, "request_irq() failed\n");
+		goto fail_irq;
+	}
+	if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+	    irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+		dev_err(c->dev, "irq_set_affinity() failed\n");
+		goto fail_affinity;
+	}
+
+	/* Need EQCR to be empty before continuing */
+	isdr &= ~QM_PIRQ_EQCI;
+	qm_out(p, QM_REG_ISDR, isdr);
+	ret = qm_eqcr_get_fill(p);
+	if (ret) {
+		dev_err(c->dev, "EQCR unclean\n");
+		goto fail_eqcr_empty;
+	}
+	isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+	qm_out(p, QM_REG_ISDR, isdr);
+	if (qm_dqrr_current(p)) {
+		dev_err(c->dev, "DQRR unclean\n");
+		qm_dqrr_cdc_consume_n(p, 0xffff);
+	}
+	if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+		/* special handling, drain just in case it's a few FQRNIs */
+		const union qm_mr_entry *e = qm_mr_current(p);
+
+		dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
+			e->verb, e->ern.rc, e->ern.fd.addr_lo);
+		goto fail_dqrr_mr_empty;
+	}
+	/* Success */
+	portal->config = c;
+	qm_out(p, QM_REG_ISDR, 0);
+	qm_out(p, QM_REG_IIR, 0);
+	/* Write a sane SDQCR */
+	qm_dqrr_sdqcr_set(p, portal->sdqcr);
+	return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+	free_irq(c->irq, portal);
+fail_irq:
+	platform_device_del(portal->pdev);
+fail_devadd:
+	platform_device_put(portal->pdev);
+fail_devalloc:
+	kfree(portal->cgrs);
+fail_cgrs:
+	qm_mc_finish(p);
+fail_mc:
+	qm_mr_finish(p);
+fail_mr:
+	qm_dqrr_finish(p);
+fail_dqrr:
+	qm_eqcr_finish(p);
+fail_eqcr:
+	return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+					      const struct qman_cgrs *cgrs)
+{
+	struct qman_portal *portal;
+	int err;
+
+	portal = &per_cpu(qman_affine_portal, c->cpu);
+	err = qman_create_portal(portal, c, cgrs);
+	if (err)
+		return NULL;
+
+	spin_lock(&affine_mask_lock);
+	cpumask_set_cpu(c->cpu, &affine_mask);
+	affine_channels[c->cpu] = c->channel;
+	affine_portals[c->cpu] = portal;
+	spin_unlock(&affine_mask_lock);
+
+	return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+	const struct qm_portal_config *pcfg;
+
+	/* Stop dequeues on the portal */
+	qm_dqrr_sdqcr_set(&qm->p, 0);
+
+	/*
+	 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+	 * something related to QM_PIRQ_EQCI, this may need fixing.
+	 * Also, due to the prefetching model used for CI updates in the enqueue
+	 * path, this update will only invalidate the CI cacheline *after*
+	 * working on it, so we need to call this twice to ensure a full update
+	 * irrespective of where the enqueue processing was at when the teardown
+	 * began.
+	 */
+	qm_eqcr_cce_update(&qm->p);
+	qm_eqcr_cce_update(&qm->p);
+	pcfg = qm->config;
+
+	free_irq(pcfg->irq, qm);
+
+	kfree(qm->cgrs);
+	qm_mc_finish(&qm->p);
+	qm_mr_finish(&qm->p);
+	qm_dqrr_finish(&qm->p);
+	qm_eqcr_finish(&qm->p);
+
+	platform_device_del(qm->pdev);
+	platform_device_put(qm->pdev);
+
+	qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+	struct qman_portal *qm = get_affine_portal();
+	const struct qm_portal_config *pcfg;
+	int cpu;
+
+	pcfg = qm->config;
+	cpu = pcfg->cpu;
+
+	qman_destroy_portal(qm);
+
+	spin_lock(&affine_mask_lock);
+	cpumask_clear_cpu(cpu, &affine_mask);
+	spin_unlock(&affine_mask_lock);
+	put_affine_portal();
+	return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+				   const union qm_mr_entry *msg, u8 verb)
+{
+	switch (verb) {
+	case QM_MR_VERB_FQRL:
+		DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+		fq_clear(fq, QMAN_FQ_STATE_ORL);
+		break;
+	case QM_MR_VERB_FQRN:
+		DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+			    fq->state == qman_fq_state_sched);
+		DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+		fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+		if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		fq->state = qman_fq_state_retired;
+		break;
+	case QM_MR_VERB_FQPN:
+		DPAA_ASSERT(fq->state == qman_fq_state_sched);
+		DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+		fq->state = qman_fq_state_parked;
+	}
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+	struct qman_portal *p = container_of(work, struct qman_portal,
+					     congestion_work);
+	struct qman_cgrs rr, c;
+	union qm_mc_result *mcr;
+	struct qman_cgr *cgr;
+
+	spin_lock(&p->cgr_lock);
+	qm_mc_start(&p->p);
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		spin_unlock(&p->cgr_lock);
+		dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+		return;
+	}
+	/* mask out the ones I'm not interested in */
+	qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+		      &p->cgrs[0]);
+	/* check previous snapshot for delta, enter/exit congestion */
+	qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+	/* update snapshot */
+	qman_cgrs_cp(&p->cgrs[1], &rr);
+	/* Invoke callback */
+	list_for_each_entry(cgr, &p->cgr_cbs, node)
+		if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+			cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+	spin_unlock(&p->cgr_lock);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+	struct qman_portal *p = container_of(work, struct qman_portal,
+					     mr_work);
+	const union qm_mr_entry *msg;
+	struct qman_fq *fq;
+	u8 verb, num = 0;
+
+	preempt_disable();
+
+	while (1) {
+		qm_mr_pvb_update(&p->p);
+		msg = qm_mr_current(&p->p);
+		if (!msg)
+			break;
+
+		verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+		/* The message is a software ERN iff the 0x20 bit is clear */
+		if (verb & 0x20) {
+			switch (verb) {
+			case QM_MR_VERB_FQRNI:
+				/* nada, we drop FQRNIs on the floor */
+				break;
+			case QM_MR_VERB_FQRN:
+			case QM_MR_VERB_FQRL:
+				/* Lookup in the retirement table */
+				fq = fqid_to_fq(msg->fq.fqid);
+				if (WARN_ON(!fq))
+					break;
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_FQPN:
+				/* Parked */
+				fq = tag_to_fq(msg->fq.contextB);
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_DC_ERN:
+				/* DCP ERN */
+				pr_crit_once("Leaking DCP ERNs!\n");
+				break;
+			default:
+				pr_crit("Invalid MR verb 0x%02x\n", verb);
+			}
+		} else {
+			/* Its a software ERN */
+			fq = tag_to_fq(msg->ern.tag);
+			fq->cb.ern(p, fq, msg);
+		}
+		num++;
+		qm_mr_next(&p->p);
+	}
+
+	qm_mr_cci_consume(&p->p, num);
+	preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+	if (is & QM_PIRQ_CSCI) {
+		queue_work_on(smp_processor_id(), qm_portal_wq,
+			      &p->congestion_work);
+	}
+
+	if (is & QM_PIRQ_EQRI) {
+		qm_eqcr_cce_update(&p->p);
+		qm_eqcr_set_ithresh(&p->p, 0);
+		wake_up(&affine_queue);
+	}
+
+	if (is & QM_PIRQ_MRI) {
+		queue_work_on(smp_processor_id(), qm_portal_wq,
+			      &p->mr_work);
+	}
+
+	return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+	p->vdqcr_owned = NULL;
+	fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+	wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ *   (i) setting/clearing vdqcr_owned, and
+ *  (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ *   (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ *	 vdqcr_owned field (which it does before setting VDQCR), and
+ *	 qman_volatile_dequeue() blocks interrupts and preemption while this is
+ *	 done so that we can't interfere.
+ *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ *	 with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit)
+{
+	const struct qm_dqrr_entry *dq;
+	struct qman_fq *fq;
+	enum qman_cb_dqrr_result res;
+	unsigned int limit = 0;
+
+	do {
+		qm_dqrr_pvb_update(&p->p);
+		dq = qm_dqrr_current(&p->p);
+		if (!dq)
+			break;
+
+		if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+			/*
+			 * VDQCR: don't trust contextB as the FQ may have
+			 * been configured for h/w consumption and we're
+			 * draining it post-retirement.
+			 */
+			fq = p->vdqcr_owned;
+			/*
+			 * We only set QMAN_FQ_STATE_NE when retiring, so we
+			 * only need to check for clearing it when doing
+			 * volatile dequeues.  It's one less thing to check
+			 * in the critical path (SDQCR).
+			 */
+			if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+				fq_clear(fq, QMAN_FQ_STATE_NE);
+			/*
+			 * This is duplicated from the SDQCR code, but we
+			 * have stuff to do before *and* after this callback,
+			 * and we don't want multiple if()s in the critical
+			 * path (SDQCR).
+			 */
+			res = fq->cb.dqrr(p, fq, dq);
+			if (res == qman_cb_dqrr_stop)
+				break;
+			/* Check for VDQCR completion */
+			if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+				clear_vdqcr(p, fq);
+		} else {
+			/* SDQCR: contextB points to the FQ */
+			fq = tag_to_fq(dq->contextB);
+			/* Now let the callback do its stuff */
+			res = fq->cb.dqrr(p, fq, dq);
+			/*
+			 * The callback can request that we exit without
+			 * consuming this entry nor advancing;
+			 */
+			if (res == qman_cb_dqrr_stop)
+				break;
+		}
+		/* Interpret 'dq' from a driver perspective. */
+		/*
+		 * Parking isn't possible unless HELDACTIVE was set. NB,
+		 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+		 * check for HELDACTIVE to cover both.
+		 */
+		DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+			    (res != qman_cb_dqrr_park));
+		/* just means "skip it, I'll consume it myself later on" */
+		if (res != qman_cb_dqrr_defer)
+			qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+						 res == qman_cb_dqrr_park);
+		/* Move forward */
+		qm_dqrr_next(&p->p);
+		/*
+		 * Entry processed and consumed, increment our counter.  The
+		 * callback can request that we exit after consuming the
+		 * entry, and we also exit if we reach our processing limit,
+		 * so loop back only if neither of these conditions is met.
+		 */
+	} while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+	return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
+	set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+	qm_out(&p->p, QM_REG_IER, p->irq_sources);
+	local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+	unsigned long irqflags;
+	u32 ier;
+
+	/*
+	 * Our interrupt handler only processes+clears status register bits that
+	 * are in p->irq_sources. As we're trimming that mask, if one of them
+	 * were to assert in the status register just before we remove it from
+	 * the enable register, there would be an interrupt-storm when we
+	 * release the IRQ lock. So we wait for the enable register update to
+	 * take effect in h/w (by reading it back) and then clear all other bits
+	 * in the status register. Ie. we clear them from ISR once it's certain
+	 * IER won't allow them to reassert.
+	 */
+	local_irq_save(irqflags);
+	bits &= QM_PIRQ_VISIBLE;
+	clear_bits(bits, &p->irq_sources);
+	qm_out(&p->p, QM_REG_IER, p->irq_sources);
+	ier = qm_in(&p->p, QM_REG_IER);
+	/*
+	 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+	 * data-dependency, ie. to protect against re-ordering.
+	 */
+	qm_out(&p->p, QM_REG_ISR, ~ier);
+	local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+	return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+	if (cpu < 0) {
+		struct qman_portal *portal = get_affine_portal();
+
+		cpu = portal->config->cpu;
+		put_affine_portal();
+	}
+	WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+	return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+void *qman_get_affine_portal(int cpu)
+{
+	return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+	return __poll_portal_fast(p, limit);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
+	pools &= p->config->pools;
+	p->sdqcr |= pools;
+	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+	local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+	switch (result) {
+	case QM_MCR_RESULT_NULL:
+		return "QM_MCR_RESULT_NULL";
+	case QM_MCR_RESULT_OK:
+		return "QM_MCR_RESULT_OK";
+	case QM_MCR_RESULT_ERR_FQID:
+		return "QM_MCR_RESULT_ERR_FQID";
+	case QM_MCR_RESULT_ERR_FQSTATE:
+		return "QM_MCR_RESULT_ERR_FQSTATE";
+	case QM_MCR_RESULT_ERR_NOTEMPTY:
+		return "QM_MCR_RESULT_ERR_NOTEMPTY";
+	case QM_MCR_RESULT_PENDING:
+		return "QM_MCR_RESULT_PENDING";
+	case QM_MCR_RESULT_ERR_BADCOMMAND:
+		return "QM_MCR_RESULT_ERR_BADCOMMAND";
+	}
+	return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+	if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+		int ret = qman_alloc_fqid(&fqid);
+
+		if (ret)
+			return ret;
+	}
+	fq->fqid = fqid;
+	fq->flags = flags;
+	fq->state = qman_fq_state_oos;
+	fq->cgr_groupid = 0;
+
+	/* A context_b of 0 is allegedly special, so don't use that fqid */
+	if (fqid == 0 || fqid >= num_fqids) {
+		WARN(1, "bad fqid %d\n", fqid);
+		return -EINVAL;
+	}
+
+	fq->idx = fqid * 2;
+	if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+		fq->idx++;
+
+	WARN_ON(fq_table[fq->idx]);
+	fq_table[fq->idx] = fq;
+
+	return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+	/*
+	 * We don't need to lock the FQ as it is a pre-condition that the FQ be
+	 * quiesced. Instead, run some checks.
+	 */
+	switch (fq->state) {
+	case qman_fq_state_parked:
+	case qman_fq_state_oos:
+		if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+			qman_release_fqid(fq->fqid);
+
+		DPAA_ASSERT(fq_table[fq->idx]);
+		fq_table[fq->idx] = NULL;
+		return;
+	default:
+		break;
+	}
+	DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+	return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	u8 res, myverb;
+	int ret = 0;
+
+	myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+		? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+	if (fq->state != qman_fq_state_oos &&
+	    fq->state != qman_fq_state_parked)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+		/* And can't be set at the same time as TDTHRESH */
+		if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+			return -EINVAL;
+	}
+	/* Issue an INITFQ_[PARKED|SCHED] management command */
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+	    (fq->state != qman_fq_state_oos &&
+	     fq->state != qman_fq_state_parked)) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initfq = *opts;
+	mcc->initfq.fqid = fq->fqid;
+	mcc->initfq.count = 0;
+	/*
+	 * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+	 * demux pointer. Otherwise, the caller-provided value is allowed to
+	 * stand, don't overwrite it.
+	 */
+	if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+		dma_addr_t phys_fq;
+
+		mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+		mcc->initfq.fqd.context_b = fq_to_tag(fq);
+		/*
+		 *  and the physical address - NB, if the user wasn't trying to
+		 * set CONTEXTA, clear the stashing settings.
+		 */
+		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+			mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+			memset(&mcc->initfq.fqd.context_a, 0,
+				sizeof(mcc->initfq.fqd.context_a));
+		} else {
+			phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+						 DMA_TO_DEVICE);
+			qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+		}
+	}
+	if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+		int wq = 0;
+
+		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+			mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+			wq = 4;
+		}
+		qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+	}
+	qm_mc_commit(&p->p, myverb);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(p->config->dev, "MCR timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	if (opts) {
+		if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+			if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+				fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+			else
+				fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+		}
+		if (opts->we_mask & QM_INITFQ_WE_CGID)
+			fq->cgr_groupid = opts->fqd.cgid;
+	}
+	fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+		qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	int ret = 0;
+
+	if (fq->state != qman_fq_state_parked)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	/* Issue a ALTERFQ_SCHED management command */
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+	    fq->state != qman_fq_state_parked) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_sched;
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	int ret;
+	u8 res;
+
+	if (fq->state != qman_fq_state_parked &&
+	    fq->state != qman_fq_state_sched)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+	    fq->state == qman_fq_state_retired ||
+	    fq->state == qman_fq_state_oos) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+	res = mcr->result;
+	/*
+	 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+	 * and defer the flags until FQRNI or FQRN (respectively) show up. But
+	 * "Friendly" is to process OK immediately, and not set CHANGING. We do
+	 * friendly, otherwise the caller doesn't necessarily have a fully
+	 * "retired" FQ on return even if the retirement was immediate. However
+	 * this does mean some code duplication between here and
+	 * fq_state_change().
+	 */
+	if (res == QM_MCR_RESULT_OK) {
+		ret = 0;
+		/* Process 'fq' right away, we'll ignore FQRNI */
+		if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		if (flags)
+			*flags = fq->flags;
+		fq->state = qman_fq_state_retired;
+		if (fq->cb.fqs) {
+			/*
+			 * Another issue with supporting "immediate" retirement
+			 * is that we're forced to drop FQRNIs, because by the
+			 * time they're seen it may already be "too late" (the
+			 * fq may have been OOS'd and free()'d already). But if
+			 * the upper layer wants a callback whether it's
+			 * immediate or not, we have to fake a "MR" entry to
+			 * look like an FQRNI...
+			 */
+			union qm_mr_entry msg;
+
+			msg.verb = QM_MR_VERB_FQRNI;
+			msg.fq.fqs = mcr->alterfq.fqs;
+			msg.fq.fqid = fq->fqid;
+			msg.fq.contextB = fq_to_tag(fq);
+			fq->cb.fqs(p, fq, &msg);
+		}
+	} else if (res == QM_MCR_RESULT_PENDING) {
+		ret = 1;
+		fq_set(fq, QMAN_FQ_STATE_CHANGING);
+	} else {
+		ret = -EIO;
+	}
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	int ret = 0;
+
+	if (fq->state != qman_fq_state_retired)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+	    fq->state != qman_fq_state_retired) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_oos;
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+	if (mcr->result == QM_MCR_RESULT_OK)
+		*fqd = mcr->queryfq.fqd;
+	else
+		ret = -EIO;
+out:
+	put_affine_portal();
+	return ret;
+}
+
+static int qman_query_fq_np(struct qman_fq *fq,
+			    struct qm_mcr_queryfq_np *np)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	if (mcr->result == QM_MCR_RESULT_OK)
+		*np = mcr->queryfq_np;
+	else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+		ret = -ERANGE;
+	else
+		ret = -EIO;
+out:
+	put_affine_portal();
+	return ret;
+}
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+			  struct qm_mcr_querycgr *cgrd)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	mcc->querycgr.cgid = cgr->cgrid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+	if (mcr->result == QM_MCR_RESULT_OK)
+		*cgrd = mcr->querycgr;
+	else {
+		dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+			mcr_result_str(mcr->result));
+		ret = -EIO;
+	}
+out:
+	put_affine_portal();
+	return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+	struct qm_mcr_querycgr query_cgr;
+	int err;
+
+	err = qman_query_cgr(cgr, &query_cgr);
+	if (err)
+		return err;
+
+	*result = !!query_cgr.cgr.cs;
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+	unsigned long irqflags;
+	int ret = -EBUSY;
+
+	local_irq_save(irqflags);
+	if (p->vdqcr_owned)
+		goto out;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		goto out;
+
+	fq_set(fq, QMAN_FQ_STATE_VDQCR);
+	p->vdqcr_owned = fq;
+	qm_dqrr_vdqcr_set(&p->p, vdqcr);
+	ret = 0;
+out:
+	local_irq_restore(irqflags);
+	return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+	int ret;
+
+	*p = get_affine_portal();
+	ret = set_p_vdqcr(*p, fq, vdqcr);
+	put_affine_portal();
+	return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+				u32 vdqcr, u32 flags)
+{
+	int ret = 0;
+
+	if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+		ret = wait_event_interruptible(affine_queue,
+				!set_vdqcr(p, fq, vdqcr));
+	else
+		wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+	return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+	struct qman_portal *p;
+	int ret;
+
+	if (fq->state != qman_fq_state_parked &&
+	    fq->state != qman_fq_state_retired)
+		return -EINVAL;
+	if (vdqcr & QM_VDQCR_FQID_MASK)
+		return -EINVAL;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		return -EBUSY;
+	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+	if (flags & QMAN_VOLATILE_FLAG_WAIT)
+		ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+	else
+		ret = set_vdqcr(&p, fq, vdqcr);
+	if (ret)
+		return ret;
+	/* VDQCR is set */
+	if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+		if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+			/*
+			 * NB: don't propagate any error - the caller wouldn't
+			 * know whether the VDQCR was issued or not. A signal
+			 * could arrive after returning anyway, so the caller
+			 * can check signal_pending() if that's an issue.
+			 */
+			wait_event_interruptible(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+		else
+			wait_event(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+	if (avail)
+		qm_eqcr_cce_prefetch(&p->p);
+	else
+		qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+	struct qman_portal *p;
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags;
+	u8 avail;
+
+	p = get_affine_portal();
+	local_irq_save(irqflags);
+
+	if (p->use_eqcr_ci_stashing) {
+		/*
+		 * The stashing case is easy, only update if we need to in
+		 * order to try and liberate ring entries.
+		 */
+		eq = qm_eqcr_start_stash(&p->p);
+	} else {
+		/*
+		 * The non-stashing case is harder, need to prefetch ahead of
+		 * time.
+		 */
+		avail = qm_eqcr_get_avail(&p->p);
+		if (avail < 2)
+			update_eqcr_ci(p, avail);
+		eq = qm_eqcr_start_no_stash(&p->p);
+	}
+
+	if (unlikely(!eq))
+		goto out;
+
+	eq->fqid = fq->fqid;
+	eq->tag = fq_to_tag(fq);
+	eq->fd = *fd;
+
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+	local_irq_restore(irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+			 struct qm_mcc_initcgr *opts)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	u8 verb = QM_MCC_VERB_MODIFYCGR;
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initcgr = *opts;
+	mcc->initcgr.cgid = cgr->cgrid;
+	if (flags & QMAN_CGR_FLAG_USE_INIT)
+		verb = QM_MCC_VERB_INITCGR;
+	qm_mc_commit(&p->p, verb);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+	if (mcr->result != QM_MCR_RESULT_OK)
+		ret = -EIO;
+
+out:
+	put_affine_portal();
+	return ret;
+}
+
+#define PORTAL_IDX(n)	(n->config->channel - QM_CHANNEL_SWPORTAL0)
+#define TARG_MASK(n)	(BIT(31) >> PORTAL_IDX(n))
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+	struct qman_cgr cgr;
+	int err_cnt = 0;
+
+	for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+		if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+			err_cnt++;
+	}
+
+	if (err_cnt)
+		pr_err("Warning: %d error%s while initialising CGR h/w\n",
+		       err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+		    struct qm_mcc_initcgr *opts)
+{
+	struct qm_mcr_querycgr cgr_state;
+	struct qm_mcc_initcgr local_opts = {};
+	int ret;
+	struct qman_portal *p;
+
+	/*
+	 * We have to check that the provided CGRID is within the limits of the
+	 * data-structures, for obvious reasons. However we'll let h/w take
+	 * care of determining whether it's within the limits of what exists on
+	 * the SoC.
+	 */
+	if (cgr->cgrid >= CGR_NUM)
+		return -EINVAL;
+
+	preempt_disable();
+	p = get_affine_portal();
+	qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+	preempt_enable();
+
+	cgr->chan = p->config->channel;
+	spin_lock(&p->cgr_lock);
+
+	if (opts) {
+		ret = qman_query_cgr(cgr, &cgr_state);
+		if (ret)
+			goto out;
+		if (opts)
+			local_opts = *opts;
+		if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+			local_opts.cgr.cscn_targ_upd_ctrl =
+				QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+		else
+			/* Overwrite TARG */
+			local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+						   TARG_MASK(p);
+		local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+		/* send init if flags indicate so */
+		if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+			ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+					    &local_opts);
+		else
+			ret = qm_modify_cgr(cgr, 0, &local_opts);
+		if (ret)
+			goto out;
+	}
+
+	list_add(&cgr->node, &p->cgr_cbs);
+
+	/* Determine if newly added object requires its callback to be called */
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret) {
+		/* we can't go back, so proceed and return success */
+		dev_err(p->config->dev, "CGR HW state partially modified\n");
+		ret = 0;
+		goto out;
+	}
+	if (cgr->cb && cgr_state.cgr.cscn_en &&
+	    qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+		cgr->cb(p, cgr, 1);
+out:
+	spin_unlock(&p->cgr_lock);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+	unsigned long irqflags;
+	struct qm_mcr_querycgr cgr_state;
+	struct qm_mcc_initcgr local_opts;
+	int ret = 0;
+	struct qman_cgr *i;
+	struct qman_portal *p = get_affine_portal();
+
+	if (cgr->chan != p->config->channel) {
+		/* attempt to delete from other portal than creator */
+		dev_err(p->config->dev, "CGR not owned by current portal");
+		dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+			cgr->chan, p->config->channel);
+
+		ret = -EINVAL;
+		goto put_portal;
+	}
+	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+	spin_lock_irqsave(&p->cgr_lock, irqflags);
+	list_del(&cgr->node);
+	/*
+	 * If there are no other CGR objects for this CGRID in the list,
+	 * update CSCN_TARG accordingly
+	 */
+	list_for_each_entry(i, &p->cgr_cbs, node)
+		if (i->cgrid == cgr->cgrid && i->cb)
+			goto release_lock;
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret)  {
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+		goto release_lock;
+	}
+	/* Overwrite TARG */
+	local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+	else
+		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+							 ~(TARG_MASK(p));
+	ret = qm_modify_cgr(cgr, 0, &local_opts);
+	if (ret)
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+	struct qman_cgr *cgr;
+	struct completion completion;
+};
+
+static int qman_delete_cgr_thread(void *p)
+{
+	struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
+	int ret;
+
+	ret = qman_delete_cgr(cgr_comp->cgr);
+	complete(&cgr_comp->completion);
+
+	return ret;
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+	struct task_struct *thread;
+	struct cgr_comp cgr_comp;
+
+	preempt_disable();
+	if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+		init_completion(&cgr_comp.completion);
+		cgr_comp.cgr = cgr;
+		thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
+					"cgr_del");
+
+		if (IS_ERR(thread))
+			goto out;
+
+		kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
+		wake_up_process(thread);
+		wait_for_completion(&cgr_comp.completion);
+		preempt_enable();
+		return;
+	}
+out:
+	qman_delete_cgr(cgr);
+	preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+	const union qm_mr_entry *msg;
+	int found = 0;
+
+	qm_mr_pvb_update(p);
+	msg = qm_mr_current(p);
+	while (msg) {
+		if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+			found = 1;
+		qm_mr_next(p);
+		qm_mr_cci_consume_to_current(p);
+		qm_mr_pvb_update(p);
+		msg = qm_mr_current(p);
+	}
+	return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+				      bool wait)
+{
+	const struct qm_dqrr_entry *dqrr;
+	int found = 0;
+
+	do {
+		qm_dqrr_pvb_update(p);
+		dqrr = qm_dqrr_current(p);
+		if (!dqrr)
+			cpu_relax();
+	} while (wait && !dqrr);
+
+	while (dqrr) {
+		if (dqrr->fqid == fqid && (dqrr->stat & s))
+			found = 1;
+		qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+		qm_dqrr_pvb_update(p);
+		qm_dqrr_next(p);
+		dqrr = qm_dqrr_current(p);
+	}
+	return found;
+}
+
+#define qm_mr_drain(p, V) \
+	_qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+	_qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+	_qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+	_qm_dqrr_consume_and_match(p, 0, 0, false)
+
+static int qman_shutdown_fq(u32 fqid)
+{
+	struct qman_portal *p;
+	struct device *dev;
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	int orl_empty, drain = 0, ret = 0;
+	u32 channel, wq, res;
+	u8 state;
+
+	p = get_affine_portal();
+	dev = p->config->dev;
+	/* Determine the state of the FQID */
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq_np.fqid = fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(dev, "QUERYFQ_NP timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+	if (state == QM_MCR_NP_STATE_OOS)
+		goto out; /* Already OOS, no need to do anymore checks */
+
+	/* Query which channel the FQ is using */
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(dev, "QUERYFQ timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+	/* Need to store these since the MCR gets reused */
+	channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+	wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+	switch (state) {
+	case QM_MCR_NP_STATE_TEN_SCHED:
+	case QM_MCR_NP_STATE_TRU_SCHED:
+	case QM_MCR_NP_STATE_ACTIVE:
+	case QM_MCR_NP_STATE_PARKED:
+		orl_empty = 0;
+		mcc = qm_mc_start(&p->p);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+		if (!qm_mc_result_timeout(&p->p, &mcr)) {
+			dev_err(dev, "QUERYFQ_NP timeout\n");
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			    QM_MCR_VERB_ALTER_RETIRE);
+		res = mcr->result; /* Make a copy as we reuse MCR below */
+
+		if (res == QM_MCR_RESULT_PENDING) {
+			/*
+			 * Need to wait for the FQRN in the message ring, which
+			 * will only occur once the FQ has been drained.  In
+			 * order for the FQ to drain the portal needs to be set
+			 * to dequeue from the channel the FQ is scheduled on
+			 */
+			int found_fqrn = 0;
+			u16 dequeue_wq = 0;
+
+			/* Flag that we need to drain FQ */
+			drain = 1;
+
+			if (channel >= qm_channel_pool1 &&
+			    channel < qm_channel_pool1 + 15) {
+				/* Pool channel, enable the bit in the portal */
+				dequeue_wq = (channel -
+					      qm_channel_pool1 + 1)<<4 | wq;
+			} else if (channel < qm_channel_pool1) {
+				/* Dedicated channel */
+				dequeue_wq = wq;
+			} else {
+				dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+					fqid, channel);
+				ret = -EBUSY;
+				goto out;
+			}
+			/* Set the sdqcr to drain this channel */
+			if (channel < qm_channel_pool1)
+				qm_dqrr_sdqcr_set(&p->p,
+						  QM_SDQCR_TYPE_ACTIVE |
+						  QM_SDQCR_CHANNELS_DEDICATED);
+			else
+				qm_dqrr_sdqcr_set(&p->p,
+						  QM_SDQCR_TYPE_ACTIVE |
+						  QM_SDQCR_CHANNELS_POOL_CONV
+						  (channel));
+			do {
+				/* Keep draining DQRR while checking the MR*/
+				qm_dqrr_drain_nomatch(&p->p);
+				/* Process message ring too */
+				found_fqrn = qm_mr_drain(&p->p, FQRN);
+				cpu_relax();
+			} while (!found_fqrn);
+
+		}
+		if (res != QM_MCR_RESULT_OK &&
+		    res != QM_MCR_RESULT_PENDING) {
+			dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+				fqid, res);
+			ret = -EIO;
+			goto out;
+		}
+		if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+			/*
+			 * ORL had no entries, no need to wait until the
+			 * ERNs come in
+			 */
+			orl_empty = 1;
+		}
+		/*
+		 * Retirement succeeded, check to see if FQ needs
+		 * to be drained
+		 */
+		if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+			/* FQ is Not Empty, drain using volatile DQ commands */
+			do {
+				u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+				qm_dqrr_vdqcr_set(&p->p, vdqcr);
+				/*
+				 * Wait for a dequeue and process the dequeues,
+				 * making sure to empty the ring completely
+				 */
+			} while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+		}
+		qm_dqrr_sdqcr_set(&p->p, 0);
+
+		while (!orl_empty) {
+			/* Wait for the ORL to have been completely drained */
+			orl_empty = qm_mr_drain(&p->p, FQRL);
+			cpu_relax();
+		}
+		mcc = qm_mc_start(&p->p);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+		if (!qm_mc_result_timeout(&p->p, &mcr)) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			    QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result != QM_MCR_RESULT_OK) {
+			dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+				fqid, mcr->result);
+			ret = -EIO;
+			goto out;
+		}
+		break;
+
+	case QM_MCR_NP_STATE_RETIRED:
+		/* Send OOS Command */
+		mcc = qm_mc_start(&p->p);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+		if (!qm_mc_result_timeout(&p->p, &mcr)) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			    QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result) {
+			dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+				fqid, mcr->result);
+			ret = -EIO;
+			goto out;
+		}
+		break;
+
+	case QM_MCR_NP_STATE_OOS:
+		/*  Done */
+		break;
+
+	default:
+		ret = -EIO;
+	}
+
+out:
+	put_affine_portal();
+	return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal)
+{
+	return portal->config;
+}
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+	unsigned long addr;
+
+	addr = gen_pool_alloc(p, cnt);
+	if (!addr)
+		return -ENOMEM;
+
+	*result = addr & ~DPAA_GENALLOC_OFF;
+
+	return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+	return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+	return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+	return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+	int ret = qman_shutdown_fq(fqid);
+
+	if (ret) {
+		pr_debug("FQID %d leaked\n", fqid);
+		return ret;
+	}
+
+	gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+	/*
+	 * We query all FQDs starting from
+	 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+	 * whose destination channel is the pool-channel being released.
+	 * When a non-OOS FQD is found we attempt to clean it up
+	 */
+	struct qman_fq fq = {
+		.fqid = 1
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err)
+			/* FQID range exceeded, found no problems */
+			return 0;
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			if (WARN_ON(err))
+				return 0;
+			if (qm_fqd_get_chan(&fqd) == qp) {
+				/* The channel is the FQ's target, clean it */
+				err = qman_shutdown_fq(fq.fqid);
+				if (err)
+					/*
+					 * Couldn't shut down the FQ
+					 * so the pool must be leaked
+					 */
+					return err;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+	int ret;
+
+	ret = qpool_cleanup(qp);
+	if (ret) {
+		pr_debug("CHID %d leaked\n", qp);
+		return ret;
+	}
+
+	gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+	/*
+	 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+	 * error, looking for non-OOS FQDs whose CGR is the CGR being released
+	 */
+	struct qman_fq fq = {
+		.fqid = 1
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err)
+			/* FQID range exceeded, found no problems */
+			return 0;
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			if (WARN_ON(err))
+				return 0;
+			if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+			    fqd.cgid == cgrid) {
+				pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+				       cgrid, fq.fqid);
+				return -EIO;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+	int ret;
+
+	ret = cgr_cleanup(cgrid);
+	if (ret) {
+		pr_debug("CGRID %d leaked\n", cgrid);
+		return ret;
+	}
+
+	gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 0000000..4faa72b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,817 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n)	(0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n)	(0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n)	(0x000c + ((n) * 0x10))
+#define REG_DD_CFG		0x0200
+#define REG_DCP_CFG(n)		(0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n)	(0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n)	(0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC		0x0400
+#define REG_PFDR_FP_HEAD	0x0404
+#define REG_PFDR_FP_TAIL	0x0408
+#define REG_PFDR_FP_LWIT	0x0410
+#define REG_PFDR_CFG		0x0414
+#define REG_SFDR_CFG		0x0500
+#define REG_SFDR_IN_USE		0x0504
+#define REG_WQ_CS_CFG(n)	(0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID	0x0630
+#define REG_WQ_SC_DD_CFG(n)	(0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n)	(0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n)	(0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n)	(0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n)	(0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG		0x0800
+#define REG_ECSR		0x0a00
+#define REG_ECIR		0x0a04
+#define REG_EADR		0x0a08
+#define REG_ECIR2		0x0a0c
+#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
+#define REG_MCR			0x0b00
+#define REG_MCP(n)		(0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG		0x0be0
+#define REG_HID_CFG		0x0bf0
+#define REG_IDLE_STAT		0x0bf4
+#define REG_IP_REV_1		0x0bf8
+#define REG_IP_REV_2		0x0bfc
+#define REG_FQD_BARE		0x0c00
+#define REG_PFDR_BARE		0x0c20
+#define REG_offset_BAR		0x0004	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR		0x0010	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE		0x0c80
+#define REG_QCSP_BAR		0x0c84
+#define REG_CI_SCHED_CFG	0x0d00
+#define REG_SRCIDR		0x0d04
+#define REG_LIODNR		0x0d08
+#define REG_CI_RLM_AVG		0x0d14
+#define REG_ERR_ISR		0x0e00
+#define REG_ERR_IER		0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n)	(0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n)	(0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n)	(0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR		0x01000000
+#define MCR_get_rslt(v)		(u8)((v) >> 24)
+#define MCR_rslt_idle(r)	(!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r)		((r) == 0xf0)
+#define MCR_rslt_eaccess(r)	((r) == 0xf8)
+#define MCR_rslt_inval(r)	((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV		4
+#define QM_CI_SCHED_CFG_SRQ_W		3
+#define QM_CI_SCHED_CFG_RW_W		2
+#define QM_CI_SCHED_CFG_BMAN_W		2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN	BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+	qm_wq_portal = 0,
+	qm_wq_pool = 1,
+	qm_wq_fman0 = 2,
+	qm_wq_fman1 = 3,
+	qm_wq_caam = 4,
+	qm_wq_pme = 5,
+	qm_wq_first = qm_wq_portal,
+	qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+	qm_memory_fqd,
+	qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE	0x20000000	/* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE	0x10000000	/* Corenet Target Data Error */
+#define QM_EIRQ_CITT	0x08000000	/* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI	0x04000000	/* PFDR Low Watermark */
+#define QM_EIRQ_MBEI	0x02000000	/* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI	0x01000000	/* Single-bit ECC Error */
+#define QM_EIRQ_PEBI	0x00800000	/* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI	0x00020000	/* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI	0x00010000	/* Invalid Command Verb */
+#define QM_EIRQ_IDDI	0x00000800	/* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI	0x00000400	/* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI	0x00000200	/* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI	0x00000100	/* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE	0x00000010	/* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI	0x00000008	/* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI	0x00000004	/* Invalid Enqueue State */
+#define QM_EIRQ_IECI	0x00000002	/* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI	0x00000001	/* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+			 QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+			 QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+			 QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+			 QM_EIRQ_IFSI)
+
+struct qm_ecir {
+	u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+	return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+	return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+	return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+	u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+	return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+	return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+	u32 info; /* memid[24-27], eadr[0-11] */
+		  /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+	return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+	return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+	return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+	return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+	u32 mask;
+	const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+	{ QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+	{ QM_EIRQ_CTDE, "Corenet Target Data Error" },
+	{ QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+	{ QM_EIRQ_PLWI, "PFDR Low Watermark" },
+	{ QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+	{ QM_EIRQ_SBEI, "Single-bit ECC Error" },
+	{ QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+	{ QM_EIRQ_ICVI, "Invalid Command Verb" },
+	{ QM_EIRQ_IFSI, "Invalid Flow Control State" },
+	{ QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+	{ QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+	{ QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+	{ QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+	{ QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+	{ QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+	{ QM_EIRQ_IESI, "Invalid Enqueue State" },
+	{ QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+	{ QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+	u16 addr_mask;
+	u16 bits;
+	const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+	{ 0x01FF, 24, "FQD cache tag memory 0" },
+	{ 0x01FF, 24, "FQD cache tag memory 1" },
+	{ 0x01FF, 24, "FQD cache tag memory 2" },
+	{ 0x01FF, 24, "FQD cache tag memory 3" },
+	{ 0x0FFF, 512, "FQD cache memory" },
+	{ 0x07FF, 128, "SFDR memory" },
+	{ 0x01FF, 72, "WQ context memory" },
+	{ 0x00FF, 240, "CGR memory" },
+	{ 0x00FF, 302, "Internal Order Restoration List memory" },
+	{ 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+	return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+	iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+	return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+	qm_dc_portal_fman0 = 0,
+	qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+	DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+		    portal == qm_dc_portal_fman1);
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_ccsr_out(REG_DCP_CFG(portal),
+			    (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+	else
+		qm_ccsr_out(REG_DCP_CFG(portal),
+			    (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+				 u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+				 u8 csw5, u8 csw6, u8 csw7)
+{
+	qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+		    ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+		    ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+		    ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+	qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+	qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+		    (QM_CI_SCHED_CFG_SRCCIV << 24) |
+		    (QM_CI_SCHED_CFG_SRQ_W << 8) |
+		    (QM_CI_SCHED_CFG_RW_W << 4) |
+		    QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+	u32 v = qm_ccsr_in(REG_IP_REV_1);
+	*id = (v >> 16);
+	*major = (v >> 8) & 0xff;
+	*minor = v & 0xff;
+}
+
+#define PFDR_AR_EN		BIT(31)
+static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+	u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+	u32 exp = ilog2(size);
+
+	/* choke if size isn't within range */
+	DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+		    is_power_of_2(size));
+	/* choke if 'ba' has lower-alignment than 'size' */
+	DPAA_ASSERT(!(ba & (size - 1)));
+	qm_ccsr_out(offset, upper_32_bits(ba));
+	qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+	qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+	qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+	qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+	qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+	u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+	DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+	/* Make sure the command interface is 'idle' */
+	if (!MCR_rslt_idle(rslt)) {
+		dev_crit(dev, "QMAN_MCR isn't idle");
+		WARN_ON(1);
+	}
+
+	/* Write the MCR command params then the verb */
+	qm_ccsr_out(REG_MCP(0), pfdr_start);
+	/*
+	 * TODO: remove this - it's a workaround for a model bug that is
+	 * corrected in more recent versions. We use the workaround until
+	 * everyone has upgraded.
+	 */
+	qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+	dma_wmb();
+	qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+	/* Poll for the result */
+	do {
+		rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+	} while (!MCR_rslt_idle(rslt));
+	if (MCR_rslt_ok(rslt))
+		return 0;
+	if (MCR_rslt_eaccess(rslt))
+		return -EACCES;
+	if (MCR_rslt_inval(rslt))
+		return -EINVAL;
+	dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+	return -ENODEV;
+}
+
+/*
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved).  Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+	fqd_a = rmem->base;
+	fqd_sz = rmem->size;
+
+	WARN_ON(!(fqd_a && fqd_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+	pfdr_a = rmem->base;
+	pfdr_sz = rmem->size;
+
+	WARN_ON(!(pfdr_a && pfdr_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+static unsigned int qm_get_fqid_maxcnt(void)
+{
+	return fqd_sz / 64;
+}
+
+/*
+ * Flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static int zero_priv_mem(struct device *dev, struct device_node *node,
+			 phys_addr_t addr, size_t sz)
+{
+	/* map as cacheable, non-guarded */
+	void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+	memset_io(tmpp, 0, sz);
+	flush_dcache_range((unsigned long)tmpp,
+			   (unsigned long)tmpp + sz);
+	iounmap(tmpp);
+
+	return 0;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+	u32 i, j, mask = 0xffffffff;
+
+	dev_warn(dev, "ErrInt, EDATA:\n");
+	i = bit_count / 32;
+	if (bit_count % 32) {
+		i++;
+		mask = ~(mask << bit_count % 32);
+	}
+	j = 16 - i;
+	dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+	j++;
+	for (; j < 16; j++)
+		dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+				      u32 ecsr_val)
+{
+	struct qm_ecir ecir_val;
+	struct qm_eadr eadr_val;
+	int memid;
+
+	ecir_val.info = qm_ccsr_in(REG_ECIR);
+	/* Is portal info valid */
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		struct qm_ecir2 ecir2_val;
+
+		ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			dev_warn(dev, "ErrInt: %s id %d\n",
+				 qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+				 qm_ecir2_get_pnum(&ecir2_val));
+		}
+		if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+			dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+				 qm_ecir_get_fqid(&ecir_val));
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.info = qm_ccsr_in(REG_EADR);
+			memid = qm_eadr_v3_get_memid(&eadr_val);
+			dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+				 error_mdata[memid].txt,
+				 error_mdata[memid].addr_mask
+					& qm_eadr_v3_get_eadr(&eadr_val));
+			log_edata_bits(dev, error_mdata[memid].bits);
+		}
+	} else {
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			dev_warn(dev, "ErrInt: %s id %d\n",
+				 qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+				 qm_ecir_get_pnum(&ecir_val));
+		}
+		if (ecsr_val & FQID_ECSR_ERR)
+			dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+				 qm_ecir_get_fqid(&ecir_val));
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.info = qm_ccsr_in(REG_EADR);
+			memid = qm_eadr_get_memid(&eadr_val);
+			dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+				 error_mdata[memid].txt,
+				 error_mdata[memid].addr_mask
+					& qm_eadr_get_eadr(&eadr_val));
+			log_edata_bits(dev, error_mdata[memid].bits);
+		}
+	}
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+	struct device *dev = ptr;
+
+	ier_val = qm_ccsr_in(REG_ERR_IER);
+	isr_val = qm_ccsr_in(REG_ERR_ISR);
+	ecsr_val = qm_ccsr_in(REG_ECSR);
+	isr_mask = isr_val & ier_val;
+
+	if (!isr_mask)
+		return IRQ_NONE;
+
+	for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+		if (qman_hwerr_txts[i].mask & isr_mask) {
+			dev_err_ratelimited(dev, "ErrInt: %s\n",
+					    qman_hwerr_txts[i].txt);
+			if (qman_hwerr_txts[i].mask & ecsr_val) {
+				log_additional_error_info(dev, isr_mask,
+							  ecsr_val);
+				/* Re-arm error capture registers */
+				qm_ccsr_out(REG_ECSR, ecsr_val);
+			}
+			if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+				dev_dbg(dev, "Disabling error 0x%x\n",
+					qman_hwerr_txts[i].mask);
+				ier_val &= ~qman_hwerr_txts[i].mask;
+				qm_ccsr_out(REG_ERR_IER, ier_val);
+			}
+		}
+	}
+	qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+	return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+	int i, err;
+
+	/* FQD memory */
+	qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+	/* PFDR memory */
+	qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+	err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+	if (err)
+		return err;
+	/* thresholds */
+	qm_set_pfdr_threshold(512, 64);
+	qm_set_sfdr_threshold(128);
+	/* clear stale PEBI bit from interrupt status register */
+	qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+	/* corenet initiator settings */
+	qm_set_corenet_initiator();
+	/* HID settings */
+	qm_set_hid();
+	/* Set scheduling weights to defaults */
+	for (i = qm_wq_first; i <= qm_wq_last; i++)
+		qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+	/* We are not prepared to accept ERNs for hardware enqueues */
+	qm_set_dc(qm_dc_portal_fman0, 1, 0);
+	qm_set_dc(qm_dc_portal_fman1, 1, 0);
+	return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+	static int done;
+	static u32 liodn_offset;
+	u32 before, after;
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+	else
+		before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+	if (!done) {
+		liodn_offset = before & LIO_CFG_LIODN_MASK;
+		done = 1;
+		return;
+	}
+	after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+	else
+		qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+int qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+	u32 before, after;
+
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+		/* Each pair of vcpu share the same SRQ(SDEST) */
+		cpu_idx /= 2;
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+	} else {
+		before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+	}
+	return 0;
+}
+
+#define QM_FQID_RESERVED 256 /* [0..255] reserved for internal use */
+static int __init qman_resource_init(struct device *dev)
+{
+	int pool_chan_num, cgrid_num, fqid_num;
+	int ret, i;
+
+	switch (qman_ip_rev >> 8) {
+	case 1:
+		pool_chan_num = 15;
+		fqid_num = 256;
+		cgrid_num = 256;
+		break;
+	case 2:
+		pool_chan_num = 3;
+		fqid_num = 256;
+		cgrid_num = 64;
+		break;
+	case 3:
+		pool_chan_num = 15;
+		fqid_num = 512;
+		cgrid_num = 256;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+			   pool_chan_num, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+		return ret;
+	}
+
+	ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+		return ret;
+	}
+
+	/* parse pool channels into the SDQCR mask */
+	for (i = 0; i < cgrid_num; i++)
+		qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+	/*
+	 * skip range reserved for qman driver's internal use
+	 * (err handling, self tests, etc.)
+	 */
+	ret = gen_pool_add(qm_fqalloc, QM_FQID_RESERVED | DPAA_GENALLOC_OFF,
+			   qm_get_fqid_maxcnt() - QM_FQID_RESERVED, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	int ret, err_irq;
+	u16 id;
+	u8 major, minor;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+	qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+	if (!qm_ccsr_start)
+		return -ENXIO;
+
+	qm_get_version(&id, &major, &minor);
+	if (major == 1 && minor == 0) {
+		dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+			return -ENODEV;
+	} else if (major == 1 && minor == 1)
+		qman_ip_rev = QMAN_REV11;
+	else if	(major == 1 && minor == 2)
+		qman_ip_rev = QMAN_REV12;
+	else if (major == 2 && minor == 0)
+		qman_ip_rev = QMAN_REV20;
+	else if (major == 3 && minor == 0)
+		qman_ip_rev = QMAN_REV30;
+	else if (major == 3 && minor == 1)
+		qman_ip_rev = QMAN_REV31;
+	else {
+		dev_err(dev, "Unknown QMan version\n");
+		return -ENODEV;
+	}
+
+	if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
+		qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+
+	ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
+	WARN_ON(ret);
+	if (ret)
+		return -ENODEV;
+
+	ret = qman_init_ccsr(dev);
+	if (ret) {
+		dev_err(dev, "CCSR setup failed\n");
+		return ret;
+	}
+
+	err_irq = platform_get_irq(pdev, 0);
+	if (err_irq <= 0) {
+		dev_info(dev, "Can't get %s property 'interrupts'\n",
+			 node->full_name);
+		return -ENODEV;
+	}
+	ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+			       dev);
+	if (ret)  {
+		dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+			ret, node->full_name);
+		return ret;
+	}
+
+	/*
+	 * Write-to-clear any stale bits, (eg. starvation being asserted prior
+	 * to resource allocation during driver init).
+	 */
+	qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+	/* Enable Error Interrupts */
+	qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+	qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+	if (IS_ERR(qm_fqalloc)) {
+		ret = PTR_ERR(qm_fqalloc);
+		dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+	if (IS_ERR(qm_qpalloc)) {
+		ret = PTR_ERR(qm_qpalloc);
+		dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+	if (IS_ERR(qm_cgralloc)) {
+		ret = PTR_ERR(qm_cgralloc);
+		dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	ret = qman_resource_init(dev);
+	if (ret < 0)
+		return ret;
+
+	ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+	if (ret)
+		return ret;
+
+	ret = qman_wq_alloc();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+	{
+		.compatible = "fsl,qman",
+	},
+	{}
+};
+
+static struct platform_driver fsl_qman_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = fsl_qman_ids,
+		.suppress_bind_attrs = true,
+	},
+	.probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 0000000..2dc5adc
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,354 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW  1
+#define CONFIG_FSL_DPA_PIRQ_FAST  1
+
+static struct cpumask portal_cpus;
+
+static void __init portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+	struct device *dev = pcfg->dev;
+#ifdef CONFIG_FSL_PAMU
+	int ret;
+	int window_count = 1;
+	struct iommu_domain_geometry geom_attr;
+	struct pamu_stash_attribute stash_attr;
+
+	pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+	if (!pcfg->iommu_domain) {
+		dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+		goto no_iommu;
+	}
+	geom_attr.aperture_start = 0;
+	geom_attr.aperture_end =
+		((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+	geom_attr.force_aperture = true;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+				    &geom_attr);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+			ret);
+		goto out_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+				    &window_count);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+			ret);
+		goto out_domain_free;
+	}
+	stash_attr.cpu = cpu;
+	stash_attr.cache = PAMU_ATTR_CACHE_L1;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_STASH,
+				    &stash_attr);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+			__func__, ret);
+		goto out_domain_free;
+	}
+	ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+					 IOMMU_READ | IOMMU_WRITE);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+			__func__, ret);
+		goto out_domain_free;
+	}
+	ret = iommu_attach_device(pcfg->iommu_domain, dev);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+			ret);
+		goto out_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_ENABLE,
+				    &window_count);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+			ret);
+		goto out_detach_device;
+	}
+
+no_iommu:
+#endif
+	if (qman_set_sdest(pcfg->channel, cpu))
+		dev_warn(dev, "Failed to set the stash request queue\n");
+
+	return;
+
+#ifdef CONFIG_FSL_PAMU
+out_detach_device:
+	iommu_detach_device(pcfg->iommu_domain, NULL);
+out_domain_free:
+	iommu_domain_free(pcfg->iommu_domain);
+	pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+	struct qman_portal *p;
+
+	pcfg->iommu_domain = NULL;
+	portal_set_cpu(pcfg, pcfg->cpu);
+	p = qman_create_affine_portal(pcfg, NULL);
+	if (p) {
+		u32 irq_sources = 0;
+		/* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+		irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+			       QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+		irq_sources |= QM_PIRQ_DQRI;
+#endif
+		qman_p_irqsource_add(p, irq_sources);
+
+		if (pcfg->cpu == smp_processor_id())
+			qman_init_cgr_all();
+
+		dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+	} else {
+		dev_crit(pcfg->dev, "Portal failure on cpu %d\n", pcfg->cpu);
+	}
+	return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+							unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+	struct pamu_stash_attribute stash_attr;
+	int ret;
+
+	if (pcfg->iommu_domain) {
+		stash_attr.cpu = cpu;
+		stash_attr.cache = PAMU_ATTR_CACHE_L1;
+		ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+		if (ret < 0) {
+			dev_err(pcfg->dev,
+				"Failed to update pamu stash setting\n");
+			return;
+		}
+	}
+#endif
+	if (qman_set_sdest(pcfg->channel, cpu))
+		dev_warn(pcfg->dev,
+			 "Failed to update portal's stash request queue\n");
+}
+
+static void qman_offline_cpu(unsigned int cpu)
+{
+	struct qman_portal *p;
+	const struct qm_portal_config *pcfg;
+
+	p = (struct qman_portal *)affine_portals[cpu];
+	if (p) {
+		pcfg = qman_get_qm_portal_config(p);
+		if (pcfg) {
+			irq_set_affinity(pcfg->irq, cpumask_of(0));
+			qman_portal_update_sdest(pcfg, 0);
+		}
+	}
+}
+
+static void qman_online_cpu(unsigned int cpu)
+{
+	struct qman_portal *p;
+	const struct qm_portal_config *pcfg;
+
+	p = (struct qman_portal *)affine_portals[cpu];
+	if (p) {
+		pcfg = qman_get_qm_portal_config(p);
+		if (pcfg) {
+			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+			qman_portal_update_sdest(pcfg, cpu);
+		}
+	}
+}
+
+static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
+				     unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		qman_online_cpu(cpu);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		qman_offline_cpu(cpu);
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block qman_hotplug_cpu_notifier = {
+	.notifier_call = qman_hotplug_cpu_callback,
+};
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct qm_portal_config *pcfg;
+	struct resource *addr_phys[2];
+	const u32 *channel;
+	void __iomem *va;
+	int irq, len, cpu;
+	static bool once = true;
+
+	if (once) {
+		memset(affine_portals, 0,
+		       sizeof(affine_portals[0]) * num_possible_cpus());
+		once = false;
+	}
+
+	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+	if (!pcfg)
+		return -ENOMEM;
+
+	pcfg->dev = dev;
+
+	addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CE);
+	if (!addr_phys[0]) {
+		dev_err(dev, "Can't get %s property 'reg::CE'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+
+	addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CI);
+	if (!addr_phys[1]) {
+		dev_err(dev, "Can't get %s property 'reg::CI'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+
+	channel = of_get_property(node, "cell-index", &len);
+	if (!channel || (len != 4)) {
+		dev_err(dev, "Can't get %s property 'cell-index'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+	pcfg->channel = *channel;
+	pcfg->cpu = -1;
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(dev, "Can't get %s IRQ\n", node->full_name);
+		return -ENXIO;
+	}
+	pcfg->irq = irq;
+
+	/* We need the same LIODN offset for all portals */
+	qman_liodn_fixup(pcfg->channel);
+
+	va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+	if (!va)
+		goto err_ioremap1;
+
+	pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+	va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+			  _PAGE_GUARDED | _PAGE_NO_CACHE);
+	if (!va)
+		goto err_ioremap2;
+
+	pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+	pcfg->pools = qm_get_pools_sdqcr();
+
+	for_each_possible_cpu(cpu)
+		if (!cpumask_test_cpu(cpu, &portal_cpus)) {
+			cpumask_set_cpu(cpu, &portal_cpus);
+			pcfg->cpu = cpu;
+			break;
+		}
+
+	if (pcfg->cpu == -1)
+		/* unassigned portal, skip init */
+		return 0;
+
+	if (!init_pcfg(pcfg))
+		goto err_ioremap2;
+
+	/* clear irq affinity if assigned cpu is offline */
+	if (!cpumask_test_cpu(cpu, cpu_online_mask))
+		qman_offline_cpu(cpu);
+
+	return 0;
+
+err_ioremap2:
+	iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+	dev_err(dev, "ioremap failed\n");
+	return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+	{
+		.compatible = "fsl,qman-portal",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = qman_portal_ids,
+	},
+	.probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+	int ret;
+
+	ret = platform_driver_register(drv);
+	if (ret < 0)
+		return ret;
+
+	register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
+
+	return 0;
+}
+
+module_driver(qman_portal_driver,
+	      qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 0000000..1f4f255
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,370 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+	u8 verb;
+	u8 result;
+	u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+	u8 __reserved[28];
+	u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+	return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+	u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+	u8 verb;
+	u8 result;
+	u8 __reserved[30];
+	/* Access this struct using qman_cgrs_get() */
+	struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+	u8 verb;
+	u8 result;
+	u16 __reserved1;
+	struct __qm_mc_cgr cgr; /* CGR fields */
+	u8 __reserved2[6];
+	u8 i_bcnt_hi;	/* high 8-bits of 40-bit "Instant" */
+	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	u8 __reserved3[3];
+	u8 a_bcnt_hi;	/* high 8-bits of 40-bit "Average" */
+	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
+	u32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+
+/* "Query FQ Non-Programmable Fields" */
+struct qm_mcc_queryfq_np {
+	u8 _ncw_verb;
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2[56];
+} __packed;
+
+struct qm_mcr_queryfq_np {
+	u8 verb;
+	u8 result;
+	u8 __reserved1;
+	u8 state;		/* QM_MCR_NP_STATE_*** */
+	u32 fqd_link;		/* 24-bit, _res2[24-31] */
+	u16 odp_seq;		/* 14-bit, _res3[14-15] */
+	u16 orp_nesn;		/* 14-bit, _res4[14-15] */
+	u16 orp_ea_hseq;	/* 15-bit, _res5[15] */
+	u16 orp_ea_tseq;	/* 15-bit, _res6[15] */
+	u32 orp_ea_hptr;	/* 24-bit, _res7[24-31] */
+	u32 orp_ea_tptr;	/* 24-bit, _res8[24-31] */
+	u32 pfdr_hptr;		/* 24-bit, _res9[24-31] */
+	u32 pfdr_tptr;		/* 24-bit, _res10[24-31] */
+	u8 __reserved2[5];
+	u8 is;			/* 1-bit, _res12[1-7] */
+	u16 ics_surp;
+	u32 byte_cnt;
+	u32 frm_cnt;		/* 24-bit, _res13[24-31] */
+	u32 __reserved3;
+	u16 ra1_sfdr;		/* QM_MCR_NP_RA1_*** */
+	u16 ra2_sfdr;		/* QM_MCR_NP_RA2_*** */
+	u16 __reserved4;
+	u16 od1_sfdr;		/* QM_MCR_NP_OD1_*** */
+	u16 od2_sfdr;		/* QM_MCR_NP_OD2_*** */
+	u16 od3_sfdr;		/* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE		0x10
+#define QM_MCR_NP_STATE_R		0x08
+#define QM_MCR_NP_STATE_MASK		0x07	/* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS		0x00
+#define QM_MCR_NP_STATE_RETIRED		0x01
+#define QM_MCR_NP_STATE_TEN_SCHED	0x02
+#define QM_MCR_NP_STATE_TRU_SCHED	0x03
+#define QM_MCR_NP_STATE_PARKED		0x04
+#define QM_MCR_NP_STATE_ACTIVE		0x05
+#define QM_MCR_NP_PTR_MASK		0x07ff	/* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v)		(((v) >> 14) & 0x3)	/* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v)		(((v) >> 14) & 0x1)	/* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v)		(((v) >> 14) & 0x3)	/* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v)		(((v) >> 14) & 0x3)	/* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+	qm_mcr_fqd_link_mask = BIT(24)-1,
+	qm_mcr_odp_seq_mask = BIT(14)-1,
+	qm_mcr_orp_nesn_mask = BIT(14)-1,
+	qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
+	qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
+	qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
+	qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
+	qm_mcr_pfdr_hptr_mask = BIT(24)-1,
+	qm_mcr_pfdr_tptr_mask = BIT(24)-1,
+	qm_mcr_is_mask = BIT(1)-1,
+	qm_mcr_frm_cnt_mask = BIT(24)-1,
+};
+#define qm_mcr_np_get(np, field) \
+	((np)->field & (qm_mcr_##field##_mask))
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x)	((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x)	(BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM	(sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+	struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+	memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+	memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+	return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+				const struct qman_cgrs *src)
+{
+	*dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.state;
+	const u32 *_a = a->q.state;
+	const u32 *_b = b->q.state;
+
+	for (ret = 0; ret < 8; ret++)
+		*_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.state;
+	const u32 *_a = a->q.state;
+	const u32 *_b = b->q.state;
+
+	for (ret = 0; ret < 8; ret++)
+		*_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+	/*
+	 * Corenet portal addresses;
+	 * [0]==cache-enabled, [1]==cache-inhibited.
+	 */
+	void __iomem *addr_virt[2];
+	struct device *dev;
+	struct iommu_domain *iommu_domain;
+	/* Allow these to be joined in lists */
+	struct list_head list;
+	/* User-visible portal configuration settings */
+	/* portal is affined to this cpu */
+	int cpu;
+	/* portal interrupt line */
+	int irq;
+	/*
+	 * the portal's dedicated channel id, used initialising
+	 * frame queues to target this portal when scheduled
+	 */
+	u16 channel;
+	/*
+	 * mask of pool channels this portal has dequeue access to
+	 * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+	 */
+	u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+void qman_liodn_fixup(u16 channel);
+int qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR	0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR	0x80000000
+#define QM_VDQCR_EXACT			0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK		0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n)	(((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n)	(((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY	QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT	     0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT  0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH    0x00000004 /* wait till VDQCR completes */
+
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/*   QMan s/w corenet portal, low-level i/face	 */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ *   Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ *   You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ *   priority.
+ * If SOURCE == SPECIFICWQ,
+ *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ *     same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS	0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ	0x40000000
+#define QM_SDQCR_COUNT_EXACT1		0x0
+#define QM_SDQCR_COUNT_UPTO3		0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE	0x10000000
+#define QM_SDQCR_TYPE_MASK		0x03000000
+#define QM_SDQCR_TYPE_NULL		0x0
+#define QM_SDQCR_TYPE_PRIO_QOS		0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS	0x02000000
+#define QM_SDQCR_TYPE_ACTIVE		0x03000000
+#define QM_SDQCR_TOKEN_MASK		0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v)		(((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v)		(((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED	0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK	0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED	0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n)	((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n)	(n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK		0x00ffffff
+#define QM_VDQCR_FQID(n)		((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL	0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL	0x8000		/* Portal channel */
+#define QM_DQAVAIL_POOL(n)	(0x8000 >> (n))	/* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK		0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE	(QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern void *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal);
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
new file mode 100644
index 0000000..450c3c4
--- /dev/null
+++ b/include/soc/fsl/qman.h
@@ -0,0 +1,1076 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#include <linux/bitops.h>
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+extern u16 qm_channel_pool1;
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CSCI	0x00100000	/* Congestion State Change */
+#define QM_PIRQ_EQCI	0x00080000	/* Enqueue Command Committed */
+#define QM_PIRQ_EQRI	0x00040000	/* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI	0x00020000	/* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI	0x00010000	/* MR Ring (non-empty) */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
+#define QM_PIRQ_SLOW	(QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+			 QM_PIRQ_MRI)
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK	0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+	return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* --- QMan data structures (and associated constants) --- */
+
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+	union {
+		struct {
+			u8 cfg8b_w1;
+			u8 bpid;	/* Buffer Pool ID */
+			u8 cfg8b_w3;
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			__be32 addr_lo;	/* low 32-bits of 40-bit address */
+		} __packed;
+		__be64 data;
+	};
+	__be32 cfg;	/* format, offset, length / congestion */
+	union {
+		__be32 cmd;
+		__be32 status;
+	};
+} __aligned(8);
+
+#define QM_FD_FORMAT_SG		BIT(31)
+#define QM_FD_FORMAT_LONG	BIT(30)
+#define QM_FD_FORMAT_COMPOUND	BIT(29)
+#define QM_FD_FORMAT_MASK	GENMASK(31, 29)
+#define QM_FD_OFF_SHIFT		20
+#define QM_FD_OFF_MASK		GENMASK(28, 20)
+#define QM_FD_LEN_MASK		GENMASK(19, 0)
+#define QM_FD_LEN_BIG_MASK	GENMASK(28, 0)
+
+enum qm_fd_format {
+	/*
+	 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
+	 * scatter-gather table. 'big' implies a 29-bit length with no offset
+	 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+	 * implies a s/g-like table, where each entry itself represents a frame
+	 * (contiguous or scatter-gather) and the 29-bit "length" is
+	 * interpreted purely for congestion calculations, ie. a "congestion
+	 * weight".
+	 */
+	qm_fd_contig = 0,
+	qm_fd_contig_big = QM_FD_FORMAT_LONG,
+	qm_fd_sg = QM_FD_FORMAT_SG,
+	qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+	qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+	return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+	return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
+{
+	fd->addr_hi = upper_32_bits(addr);
+	fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/*
+ * The 'format' field indicates the interpretation of the remaining
+ * 29 bits of the 32-bit word.
+ * If 'format' is _contig or _sg, 20b length and 9b offset.
+ * If 'format' is _contig_big or _sg_big, 29b length.
+ * If 'format' is _compound, 29b "congestion weight".
+ */
+static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
+{
+	return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
+}
+
+static inline int qm_fd_get_offset(const struct qm_fd *fd)
+{
+	return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
+}
+
+static inline int qm_fd_get_length(const struct qm_fd *fd)
+{
+	return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
+}
+
+static inline int qm_fd_get_len_big(const struct qm_fd *fd)
+{
+	return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
+}
+
+static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
+				   int off, int len)
+{
+	fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
+			      ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
+}
+
+#define qm_fd_set_contig(fd, off, len) \
+	qm_fd_set_param(fd, qm_fd_contig, off, len)
+#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
+#define qm_fd_set_contig_big(fd, len) \
+	qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
+#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
+
+static inline void qm_fd_clear_fd(struct qm_fd *fd)
+{
+	fd->data = 0;
+	fd->cfg = 0;
+	fd->cmd = 0;
+}
+
+/* Scatter/Gather table entry */
+struct qm_sg_entry {
+	union {
+		struct {
+			u8 __reserved1[3];
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			__be32 addr_lo;	/* low 32-bits of 40-bit address */
+		};
+		__be64 data;
+	};
+	__be32 cfg;	/* E bit, F bit, length */
+	u8 __reserved2;
+	u8 bpid;
+	__be16 offset; /* 13-bit, _res[13-15]*/
+} __packed;
+
+#define QM_SG_LEN_MASK	GENMASK(29, 0)
+#define QM_SG_OFF_MASK	GENMASK(12, 0)
+#define QM_SG_FIN	BIT(30)
+#define QM_SG_EXT	BIT(31)
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+	return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+	return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
+{
+	sg->addr_hi = upper_32_bits(addr);
+	sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
+{
+	return be32_to_cpu(sg->cfg) & QM_SG_FIN;
+}
+
+static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
+{
+	return be32_to_cpu(sg->cfg) & QM_SG_EXT;
+}
+
+static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
+{
+	return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
+}
+
+static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
+{
+	sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
+}
+
+static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
+{
+	sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
+}
+
+static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
+{
+	return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
+}
+
+/* "Frame Dequeue Response" */
+struct qm_dqrr_entry {
+	u8 verb;
+	u8 stat;
+	u16 seqnum;	/* 15-bit */
+	u8 tok;
+	u8 __reserved2[3];
+	u32 fqid;	/* 24-bit */
+	u32 contextB;
+	struct qm_fd fd;
+	u8 __reserved4[32];
+} __packed;
+#define QM_DQRR_VERB_VBIT		0x80
+#define QM_DQRR_VERB_MASK		0x7f	/* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE	0x60	/* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY		0x80	/* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE	0x40	/* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE	0x20	/* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID		0x10	/* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED	0x02	/* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED	0x01	/* VDQCR or PDQCR expired*/
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+union qm_mr_entry {
+	struct {
+		u8 verb;
+		u8 __reserved[63];
+	};
+	struct {
+		u8 verb;
+		u8 dca;
+		u16 seqnum;
+		u8 rc;		/* Rej Code: 8-bit */
+		u8 orp_hi;	/* ORP: 24-bit */
+		u16 orp_lo;
+		u32 fqid;	/* 24-bit */
+		u32 tag;
+		struct qm_fd fd;
+		u8 __reserved1[32];
+	} __packed ern;
+	struct {
+		u8 verb;
+		u8 fqs;		/* Frame Queue Status */
+		u8 __reserved1[6];
+		u32 fqid;	/* 24-bit */
+		u32 contextB;
+		u8 __reserved2[48];
+	} __packed fq;		/* FQRN/FQRNI/FQRL/FQPN */
+};
+#define QM_MR_VERB_VBIT			0x80
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
+#define QM_MR_VERB_TYPE_MASK		0x27
+#define QM_MR_VERB_DC_ERN		0x20
+#define QM_MR_VERB_FQRN			0x21
+#define QM_MR_VERB_FQRNI		0x22
+#define QM_MR_VERB_FQRL			0x23
+#define QM_MR_VERB_FQPN			0x24
+#define QM_MR_RC_MASK			0xf0	/* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP		0x00
+#define QM_MR_RC_WRED			0x10
+#define QM_MR_RC_ERROR			0x20
+#define QM_MR_RC_ORPWINDOW_EARLY	0x30
+#define QM_MR_RC_ORPWINDOW_LATE		0x40
+#define QM_MR_RC_FQ_TAILDROP		0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED	0x60
+#define QM_MR_RC_ORP_ZERO		0x70
+#define QM_MR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
+
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation.
+ */
+struct qm_fqd_stashing {
+	/* See QM_STASHING_EXCL_<...> */
+	u8 exclusive;
+	/* Numbers of cachelines */
+	u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
+};
+
+struct qm_fqd_oac {
+	/* "Overhead Accounting Control", see QM_OAC_<...> */
+	u8 oac; /* oac[6-7], _res[0-5] */
+	/* Two's-complement value (-128 to +127) */
+	s8 oal; /* "Overhead Accounting Length" */
+};
+
+struct qm_fqd {
+	/* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
+	u8 orpc;
+	u8 cgid;
+	__be16 fq_ctrl;	/* See QM_FQCTRL_<...> */
+	__be16 dest_wq;	/* channel[3-15], wq[0-2] */
+	__be16 ics_cred; /* 15-bit */
+	/*
+	 * For "Initialize Frame Queue" commands, the write-enable mask
+	 * determines whether 'td' or 'oac_init' is observed. For query
+	 * commands, this field is always 'td', and 'oac_query' (below) reflects
+	 * the Overhead ACcounting values.
+	 */
+	union {
+		__be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
+		struct qm_fqd_oac oac_init;
+	};
+	__be32 context_b;
+	union {
+		/* Treat it as 64-bit opaque */
+		__be64 opaque;
+		struct {
+			__be32 hi;
+			__be32 lo;
+		};
+		/* Treat it as s/w portal stashing config */
+		/* see "FQD Context_A field used for [...]" */
+		struct {
+			struct qm_fqd_stashing stashing;
+			/*
+			 * 48-bit address of FQ context to
+			 * stash, must be cacheline-aligned
+			 */
+			__be16 context_hi;
+			__be32 context_lo;
+		} __packed;
+	} context_a;
+	struct qm_fqd_oac oac_query;
+} __packed;
+
+#define QM_FQD_CHAN_OFF		3
+#define QM_FQD_WQ_MASK		GENMASK(2, 0)
+#define QM_FQD_TD_EXP_MASK	GENMASK(4, 0)
+#define QM_FQD_TD_MANT_OFF	5
+#define QM_FQD_TD_MANT_MASK	GENMASK(12, 5)
+#define QM_FQD_TD_MAX		0xe0000000
+#define QM_FQD_TD_MANT_MAX	0xff
+#define QM_FQD_OAC_OFF		6
+#define QM_FQD_AS_OFF		4
+#define QM_FQD_DS_OFF		2
+#define QM_FQD_XS_MASK		0x3
+
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+	return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+	return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+	return qm_fqd_stashing_get64(fqd);
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+	fqd->context_a.context_hi = upper_32_bits(addr);
+	fqd->context_a.context_lo = lower_32_bits(addr);
+}
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+	fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr));
+	fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
+				      int roundup)
+{
+	u32 e = 0;
+	int td, oddbit = 0;
+
+	if (val > QM_FQD_TD_MAX)
+		return -ERANGE;
+
+	while (val > QM_FQD_TD_MANT_MAX) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+
+	td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
+	td |= (e & QM_FQD_TD_EXP_MASK);
+	fqd->td = cpu_to_be16(td);
+	return 0;
+}
+/* and the other direction */
+static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
+{
+	int td = be16_to_cpu(fqd->td);
+
+	return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
+		<< (td & QM_FQD_TD_EXP_MASK);
+}
+
+static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
+{
+	struct qm_fqd_stashing *st = &fqd->context_a.stashing;
+
+	st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
+		 ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
+		 (cs & QM_FQD_XS_MASK);
+}
+
+static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
+{
+	return fqd->context_a.stashing.cl;
+}
+
+static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
+{
+	fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
+}
+
+static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
+{
+	fqd->oac_init.oal = val;
+}
+
+static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
+{
+	fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
+				   (wq & QM_FQD_WQ_MASK));
+}
+
+static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
+{
+	return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
+}
+
+static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
+{
+	return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
+}
+
+/* See "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK		0x07ff	/* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE		0x0400	/* Congestion Group Enable */
+#define QM_FQCTRL_TDE		0x0200	/* Tail-Drop Enable */
+#define QM_FQCTRL_CTXASTASHING	0x0080	/* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH	0x0040	/* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR	0x0008	/* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK	0x0004	/* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE	0x0002	/* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE	0x0001	/* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE	QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION	0x04
+#define QM_STASHING_EXCL_DATA		0x02
+#define QM_STASHING_EXCL_CTX		0x01
+
+/* See "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS		0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG		0x1 /* Accounting for Congestion Groups */
+
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ *   MaxTH = MA * (2 ^ Mn)
+ *   Slope = SA / (2 ^ Sn)
+ *    MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+	/* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
+	u32 word;
+};
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ *   CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+	/* _res[13-15], TA[5-12], Tn[0-4] */
+	u16 word;
+};
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct.
+ */
+struct __qm_mc_cgr {
+	struct qm_cgr_wr_parm wr_parm_g;
+	struct qm_cgr_wr_parm wr_parm_y;
+	struct qm_cgr_wr_parm wr_parm_r;
+	u8 wr_en_g;	/* boolean, use QM_CGR_EN */
+	u8 wr_en_y;	/* boolean, use QM_CGR_EN */
+	u8 wr_en_r;	/* boolean, use QM_CGR_EN */
+	u8 cscn_en;	/* boolean, use QM_CGR_EN */
+	union {
+		struct {
+			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+			u16 cscn_targ_dcp_low;	/* CSCN_TARG_DCP low-16bits */
+		};
+		u32 cscn_targ;	/* use QM_CGR_TARG_* */
+	};
+	u8 cstd_en;	/* boolean, use QM_CGR_EN */
+	u8 cs;		/* boolean, only used in query response */
+	struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
+	u8 mode;	/* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN		0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT	0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP	0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n)	(0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0	0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1	0x00100000 /*			   : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+	return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
+}
+
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+					int roundup)
+{
+	u32 e = 0;
+	int oddbit = 0;
+
+	while (val > 0xff) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+	th->word = ((val & 0xff) << 5) | (e & 0x1f);
+	return 0;
+}
+
+/* "Initialize FQ" */
+struct qm_mcc_initfq {
+	u8 __reserved1[2];
+	u16 we_mask;	/* Write Enable Mask */
+	u32 fqid;	/* 24-bit */
+	u16 count;	/* Initialises 'count+1' FQDs */
+	struct qm_fqd fqd; /* the FQD fields go here */
+	u8 __reserved2[30];
+} __packed;
+/* "Initialize/Modify CGR" */
+struct qm_mcc_initcgr {
+	u8 __reserve1[2];
+	u16 we_mask;	/* Write Enable Mask */
+	struct __qm_mc_cgr cgr;	/* CGR fields */
+	u8 __reserved2[2];
+	u8 cgid;
+	u8 __reserved3[32];
+} __packed;
+
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK		0x01ff	/* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC		0x0100
+#define QM_INITFQ_WE_ORPC		0x0080
+#define QM_INITFQ_WE_CGID		0x0040
+#define QM_INITFQ_WE_FQCTRL		0x0020
+#define QM_INITFQ_WE_DESTWQ		0x0010
+#define QM_INITFQ_WE_ICSCRED		0x0008
+#define QM_INITFQ_WE_TDTHRESH		0x0004
+#define QM_INITFQ_WE_CONTEXTB		0x0002
+#define QM_INITFQ_WE_CONTEXTA		0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK			0x07ff	/* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G		0x0400
+#define QM_CGR_WE_WR_PARM_Y		0x0200
+#define QM_CGR_WE_WR_PARM_R		0x0100
+#define QM_CGR_WE_WR_EN_G		0x0080
+#define QM_CGR_WE_WR_EN_Y		0x0040
+#define QM_CGR_WE_WR_EN_R		0x0020
+#define QM_CGR_WE_CSCN_EN		0x0010
+#define QM_CGR_WE_CSCN_TARG		0x0008
+#define QM_CGR_WE_CSTD_EN		0x0004
+#define QM_CGR_WE_CS_THRES		0x0002
+#define QM_CGR_WE_MODE			0x0001
+
+#define QMAN_CGR_FLAG_USE_INIT	     0x00000001
+
+	/* Portal and Frame Queues */
+/* Represents a managed portal */
+struct qman_portal;
+
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down.
+ */
+struct qman_fq;
+
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
+struct qman_cgr;
+
+/*
+ * This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because contextB is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
+enum qman_cb_dqrr_result {
+	/* DQRR entry can be consumed */
+	qman_cb_dqrr_consume,
+	/* Like _consume, but requests parking - FQ must be held-active */
+	qman_cb_dqrr_park,
+	/* Does not consume, for DCA mode only. */
+	qman_cb_dqrr_defer,
+	/*
+	 * Stop processing without consuming this ring entry. Exits the current
+	 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+	 * an interrupt handler, the callback would typically call
+	 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+	 * otherwise the interrupt will reassert immediately.
+	 */
+	qman_cb_dqrr_stop,
+	/* Like qman_cb_dqrr_stop, but consumes the current entry. */
+	qman_cb_dqrr_consume_stop
+};
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+					struct qman_fq *fq,
+					const struct qm_dqrr_entry *dqrr);
+
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+			   const union qm_mr_entry *msg);
+
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked".
+ */
+enum qman_fq_state {
+	qman_fq_state_oos,
+	qman_fq_state_parked,
+	qman_fq_state_sched,
+	qman_fq_state_retired
+};
+
+#define QMAN_FQ_STATE_CHANGING	     0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE	     0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL	     0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS	     0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN	     0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR	     0x08000000 /* being volatile dequeued */
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ *     // myfq is allocated and driver_fq callbacks filled in;
+ *     struct my_fq {
+ *	   struct qman_fq base;
+ *	   int an_extra_field;
+ *	   [ ... add other fields to be associated with each FQ ...]
+ *     } *myfq = some_my_fq_allocator();
+ *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ *     // in a dequeue callback, access extra fields from 'fq' via a cast;
+ *     struct my_fq *myfq = (struct my_fq *)fq;
+ *     do_something_with(myfq->an_extra_field);
+ *     [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ *     many cachelines are required to stash 'struct my_fq', to accelerate not
+ *     only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+	qman_cb_dqrr dqrr;	/* for dequeued frames */
+	qman_cb_mr ern;		/* for s/w ERNs */
+	qman_cb_mr fqs;		/* frame-queue state changes*/
+};
+
+struct qman_fq {
+	/* Caller of qman_create_fq() provides these demux callbacks */
+	struct qman_fq_cb cb;
+	/*
+	 * These are internal to the driver, don't touch. In particular, they
+	 * may change, be removed, or extended (so you shouldn't rely on
+	 * sizeof(qman_fq) being a constant).
+	 */
+	u32 fqid, idx;
+	unsigned long flags;
+	enum qman_fq_state state;
+	int cgr_groupid;
+};
+
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+			    struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+	/* Set these prior to qman_create_cgr() */
+	u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+	qman_cb_cgr cb;
+	/* These are private to the driver */
+	u16 chan; /* portal channel this object is created on */
+	struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE	     0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY	     0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED	     0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL	     0x00000004 /* set dest portal */
+
+	/* Portal Management */
+
+/**
+ * qman_p_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions).
+ */
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_p_irqsource_remove - remove processing sources from being int-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions.
+ */
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *qman_affine_cpus(void);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the mask returned from qman_affine_cpus().
+ */
+u16 qman_affine_channel(int cpu);
+
+/**
+ * qman_get_affine_portal - return the portal pointer affine to cpu
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ */
+void *qman_get_affine_portal(int cpu);
+
+/**
+ * qman_p_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * The return value represents the number of DQRR entries processed.
+ */
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
+
+/**
+ * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
+
+	/* FQ management */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' or in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ *     initialised to a value used by the driver for demux.
+ *   - if context_b is initialised for demux, so is context_a in case stashing
+ *     is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full.
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count);
+#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
+
+/**
+ * qman_release_fqid - Release the specified frame queue ID
+ * @fqid: the FQID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * FQID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_fqid(u32 fqid);
+
+	/* Pool-channel management */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count);
+#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
+
+/**
+ * qman_release_pool - Release the specified pool-channel ID
+ * @id: the pool-chan ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * pool-channel ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_pool(u32 id);
+
+	/* CGR management */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+			struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
+ * @cgr: the 'cgr' object to deregister
+ *
+ * This will select the proper CPU and run there qman_delete_cgr().
+ */
+void qman_delete_cgr_safe(struct qman_cgr *cgr);
+
+/**
+ * qman_query_cgr_congested - Queries CGR's congestion status
+ * @cgr: the 'cgr' object to query
+ * @result: returns 'cgr's congestion status, 1 (true) if congested
+ */
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count);
+#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
+
+/**
+ * qman_release_cgrid - Release the specified CGR ID
+ * @id: the CGR ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * CGR ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_cgrid(u32 id);
+
+#endif	/* __FSL_QMAN_H */
-- 
1.7.11.7

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ