lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20170313190204.28190-6-roy.pledge@nxp.com>
Date:   Mon, 13 Mar 2017 15:02:01 -0400
From:   Roy Pledge <roy.pledge@....com>
To:     <gregkh@...uxfoundation.org>, <devel@...verdev.osuosl.org>,
        <linux-kernel@...r.kernel.org>, <stuyoder@...il.com>,
        <agraf@...e.de>, <arnd@...db.de>
CC:     <leoyang.li@....com>, <ioana.ciornei@....com>,
        <catalin.horghidan@....com>, <laurentiu.tudor@....com>,
        <ruxandra.radulescu@....com>, <haiying.wang@....com>,
        Roy Pledge <Roy.Pledge@....com>,
        Roy Pledge <roy.pledge@....com>,
        Stuart Yoder <stuart.yoder@....com>
Subject: [RESEND PATCH v6 5/8] bus: fsl-mc: dpio: add QBMan portal APIs for DPAA2

From: Roy Pledge <Roy.Pledge@....com>

Add QBman APIs for frame queue and buffer pool operations.

Signed-off-by: Roy Pledge <roy.pledge@....com>
Signed-off-by: Haiying Wang <haiying.wang@....com>
Signed-off-by: Stuart Yoder <stuart.yoder@....com>
---

Notes:
        -v6
           -checkpatch: Add additional () in QBMAN_INDEX_FROM_DQRR
           -checkpatch: Replace check for NULL with ! operator
        -v5
           -fixed typo that caused a compile error
        -v4
           -adjust file location to be in drivers/staging
           -updated copyright
           -added definition for static dequeue token value
           -fixed bug in SDQCR #define
           -added missing #include guard in qbman-portal.h
           -added #define for QMAN_REV_MASK
           -whitespace, alignment cleanup
        -v3
           -replace hardcoded dequeue token with a #define and check that
            token when checking for a new result (bug fix suggested by
            Ioana Radulescu)
        -v2
           -fix bug in buffer release command, by setting bpid field
           -handle error (NULL) return value from qbman_swp_mc_complete()
           -error message cleanup
           -fix bug in sending management commands where the verb was
            properly initialized

 drivers/staging/fsl-mc/bus/dpio/Makefile       |    2 +-
 drivers/staging/fsl-mc/bus/dpio/qbman-portal.c | 1033 ++++++++++++++++++++++++
 drivers/staging/fsl-mc/bus/dpio/qbman-portal.h |  469 +++++++++++
 3 files changed, 1503 insertions(+), 1 deletion(-)
 create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
 create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.h

diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/staging/fsl-mc/bus/dpio/Makefile
index 128befc..6588498 100644
--- a/drivers/staging/fsl-mc/bus/dpio/Makefile
+++ b/drivers/staging/fsl-mc/bus/dpio/Makefile
@@ -6,4 +6,4 @@ subdir-ccflags-y := -Werror
 
 obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
 
-fsl-mc-dpio-objs := dpio.o
+fsl-mc-dpio-objs := dpio.o qbman-portal.o
diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
new file mode 100644
index 0000000..c75f546
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
@@ -0,0 +1,1033 @@
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "../../include/dpaa2-global.h"
+
+#include "qbman-portal.h"
+
+#define QMAN_REV_4000   0x04000000
+#define QMAN_REV_4100   0x04010000
+#define QMAN_REV_4101   0x04010001
+#define QMAN_REV_MASK   0xffff0000
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((u32)0x80)
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE       0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQAR    0x8c0
+#define QBMAN_CINH_SWP_DQPI    0xa00
+#define QBMAN_CINH_SWP_DCAP    0xac0
+#define QBMAN_CINH_SWP_SDQCR   0xb00
+#define QBMAN_CINH_SWP_RAR     0xcc0
+#define QBMAN_CINH_SWP_ISR     0xe00
+#define QBMAN_CINH_SWP_IER     0xe40
+#define QBMAN_CINH_SWP_ISDR    0xe80
+#define QBMAN_CINH_SWP_IIR     0xec0
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR      0x600
+#define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR   0x780
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
+
+/* Define token used to determine if response written to memory is valid */
+#define QMAN_DQ_TOKEN_VALID 1
+
+/* SDQCR attribute codes */
+#define QB_SDQCR_FC_SHIFT   29
+#define QB_SDQCR_FC_MASK    0x1
+#define QB_SDQCR_DCT_SHIFT  24
+#define QB_SDQCR_DCT_MASK   0x3
+#define QB_SDQCR_TOK_SHIFT  16
+#define QB_SDQCR_TOK_MASK   0xff
+#define QB_SDQCR_SRC_SHIFT  0
+#define QB_SDQCR_SRC_MASK   0xffff
+
+/* opaque token for static dequeues */
+#define QMAN_SDQCR_TOKEN    0xbb
+
+enum qbman_sdqcr_dct {
+	qbman_sdqcr_dct_null = 0,
+	qbman_sdqcr_dct_prio_ics,
+	qbman_sdqcr_dct_active_ics,
+	qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+	qbman_sdqcr_fc_one = 0,
+	qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/* Portal Access */
+
+static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
+{
+	return readl_relaxed(p->addr_cinh + offset);
+}
+
+static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
+					u32 value)
+{
+	writel_relaxed(value, p->addr_cinh + offset);
+}
+
+static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
+{
+	return p->addr_cena + offset;
+}
+
+#define QBMAN_CINH_SWP_CFG   0xd00
+
+#define SWP_CFG_DQRR_MF_SHIFT 20
+#define SWP_CFG_EST_SHIFT     16
+#define SWP_CFG_WN_SHIFT      14
+#define SWP_CFG_RPM_SHIFT     12
+#define SWP_CFG_DCM_SHIFT     10
+#define SWP_CFG_EPM_SHIFT     8
+#define SWP_CFG_SD_SHIFT      5
+#define SWP_CFG_SP_SHIFT      4
+#define SWP_CFG_SE_SHIFT      3
+#define SWP_CFG_DP_SHIFT      2
+#define SWP_CFG_DE_SHIFT      1
+#define SWP_CFG_EP_SHIFT      0
+
+static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn,	u8 est, u8 rpm, u8 dcm,
+				    u8 epm, int sd, int sp, int se,
+				    int dp, int de, int ep)
+{
+	return cpu_to_le32 (max_fill << SWP_CFG_DQRR_MF_SHIFT |
+			    est << SWP_CFG_EST_SHIFT |
+			    wn << SWP_CFG_WN_SHIFT |
+			    rpm << SWP_CFG_RPM_SHIFT |
+			    dcm << SWP_CFG_DCM_SHIFT |
+			    epm << SWP_CFG_EPM_SHIFT |
+			    sd << SWP_CFG_SD_SHIFT |
+			    sp << SWP_CFG_SP_SHIFT |
+			    se << SWP_CFG_SE_SHIFT |
+			    dp << SWP_CFG_DP_SHIFT |
+			    de << SWP_CFG_DE_SHIFT |
+			    ep << SWP_CFG_EP_SHIFT);
+}
+
+/**
+ * qbman_swp_init() - Create a functional object representing the given
+ *                    QBMan portal descriptor.
+ * @d: the given qbman swp descriptor
+ *
+ * Return qbman_swp portal for success, NULL if the object cannot
+ * be created.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+	struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+	u32 reg;
+
+	if (!p)
+		return NULL;
+	p->desc = d;
+	p->mc.valid_bit = QB_VALID_BIT;
+	p->sdq = 0;
+	p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+	p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+	p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+
+	atomic_set(&p->vdq.available, 1);
+	p->vdq.valid_bit = QB_VALID_BIT;
+	p->dqrr.next_idx = 0;
+	p->dqrr.valid_bit = QB_VALID_BIT;
+
+	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
+		p->dqrr.dqrr_size = 4;
+		p->dqrr.reset_bug = 1;
+	} else {
+		p->dqrr.dqrr_size = 8;
+		p->dqrr.reset_bug = 0;
+	}
+
+	p->addr_cena = d->cena_bar;
+	p->addr_cinh = d->cinh_bar;
+
+	reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+				1, /* Writes Non-cacheable */
+				0, /* EQCR_CI stashing threshold */
+				3, /* RPM: Valid bit mode, RCR in array mode */
+				2, /* DCM: Discrete consumption ack mode */
+				3, /* EPM: Valid bit mode, EQCR in array mode */
+				0, /* mem stashing drop enable == FALSE */
+				1, /* mem stashing priority == TRUE */
+				0, /* mem stashing enable == FALSE */
+				1, /* dequeue stashing priority == TRUE */
+				0, /* dequeue stashing enable == FALSE */
+				0); /* EQCR_CI stashing priority == FALSE */
+
+	qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
+	reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
+	if (!reg) {
+		pr_err("qbman: the portal is not enabled!\n");
+		return NULL;
+	}
+
+	/*
+	 * SDQCR needs to be initialized to 0 when no channels are
+	 * being dequeued from or else the QMan HW will indicate an
+	 * error.  The values that were calculated above will be
+	 * applied when dequeues from a specific channel are enabled.
+	 */
+	qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
+	return p;
+}
+
+/**
+ * qbman_swp_finish() - Create and destroy a functional object representing
+ *                      the given QBMan portal descriptor.
+ * @p: the qbman_swp object to be destroyed
+ */
+void qbman_swp_finish(struct qbman_swp *p)
+{
+	kfree(p);
+}
+
+/**
+ * qbman_swp_interrupt_read_status()
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_ISR register.
+ */
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
+{
+	return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
+}
+
+/**
+ * qbman_swp_interrupt_clear_status()
+ * @p: the given software portal
+ * @mask: The mask to clear in SWP_ISR register
+ */
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
+{
+	qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_IER register.
+ */
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
+{
+	return qbman_read_register(p, QBMAN_CINH_SWP_IER);
+}
+
+/**
+ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
+ * @p: the given software portal
+ * @mask: The mask of bits to enable in SWP_IER
+ */
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
+{
+	qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
+ * @p: the given software portal object
+ *
+ * Return the value in the SWP_IIR register.
+ */
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
+{
+	return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
+}
+
+/**
+ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
+ * @p: the given software portal object
+ * @mask: The mask to set in SWP_IIR register
+ */
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
+{
+	qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+}
+
+/*
+ * Different management commands all use this common base layer of code to issue
+ * commands and poll for results.
+ */
+
+/*
+ * Returns a pointer to where the caller should fill in their management command
+ * (caller should ignore the verb byte)
+ */
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+	return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+}
+
+/*
+ * Commits merges in the caller-supplied command verb (which should not include
+ * the valid-bit) and submits the command to hardware
+ */
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
+{
+	u8 *v = cmd;
+
+	dma_wmb();
+	*v = cmd_verb | p->mc.valid_bit;
+}
+
+/*
+ * Checks for a completed response (returns non-NULL if only if the response
+ * is complete).
+ */
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+	u32 *ret, verb;
+
+	ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+
+	/* Remove the valid-bit - command completed if the rest is non-zero */
+	verb = ret[0] & ~QB_VALID_BIT;
+	if (!verb)
+		return NULL;
+	p->mc.valid_bit ^= QB_VALID_BIT;
+	return ret;
+}
+
+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
+enum qb_enqueue_commands {
+	enqueue_empty = 0,
+	enqueue_response_always = 1,
+	enqueue_rejects_to_fq = 2
+};
+
+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
+
+/**
+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
+ *                         default/starting state.
+ */
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+	memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
+ * @d:                the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ *                    rejections returned on a FQ.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+	d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+	if (respond_success)
+		d->verb |= enqueue_response_always;
+	else
+		d->verb |= enqueue_rejects_to_fq;
+}
+
+/*
+ * Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ *   -enqueue to a frame queue
+ *   -enqueue to a queuing destination
+ */
+
+/**
+ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
+ * @d:    the enqueue descriptor
+ * @fqid: the id of the frame queue to be enqueued
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
+{
+	d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
+	d->tgtid = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
+ * @d:       the enqueue descriptor
+ * @qdid:    the id of the queuing destination to be enqueued
+ * @qd_bin:  the queuing destination bin
+ * @qd_prio: the queuing destination priority
+ */
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+			  u32 qd_bin, u32 qd_prio)
+{
+	d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
+	d->tgtid = cpu_to_le32(qdid);
+	d->qdbin = cpu_to_le16(qd_bin);
+	d->qpri = qd_prio;
+}
+
+#define EQAR_IDX(eqar)     ((eqar) & 0x7)
+#define EQAR_VB(eqar)      ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
+/**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s:  the software portal used for enqueue
+ * @d:  the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+		      const struct dpaa2_fd *fd)
+{
+	struct qbman_eq_desc *p;
+	u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
+
+	if (!EQAR_SUCCESS(eqar))
+		return -EBUSY;
+
+	p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+	memcpy(&p->dca, &d->dca, 31);
+	memcpy(&p->fd, fd, sizeof(*fd));
+
+	/* Set the verb byte, have to substitute in the valid-bit */
+	dma_wmb();
+	p->verb = d->verb | EQAR_VB(eqar);
+
+	return 0;
+}
+
+/* Static (push) dequeue */
+
+/**
+ * qbman_swp_push_get() - Get the push dequeue setup
+ * @p:           the software portal object
+ * @channel_idx: the channel index to query
+ * @enabled:     returned boolean to show whether the push dequeue is enabled
+ *               for the given channel
+ */
+void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
+{
+	u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+
+	WARN_ON(channel_idx > 15);
+	*enabled = src | (1 << channel_idx);
+}
+
+/**
+ * qbman_swp_push_set() - Enable or disable push dequeue
+ * @p:           the software portal object
+ * @channel_idx: the channel index (0 to 15)
+ * @enable:      enable or disable push dequeue
+ */
+void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
+{
+	u16 dqsrc;
+
+	WARN_ON(channel_idx > 15);
+	if (enable)
+		s->sdq |= 1 << channel_idx;
+	else
+		s->sdq &= ~(1 << channel_idx);
+
+	/* Read make the complete src map.  If no channels are enabled
+	 * the SDQCR must be 0 or else QMan will assert errors
+	 */
+	dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+	if (dqsrc != 0)
+		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
+	else
+		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
+}
+
+#define QB_VDQCR_VERB_DCT_SHIFT    0
+#define QB_VDQCR_VERB_DT_SHIFT     2
+#define QB_VDQCR_VERB_RLS_SHIFT    4
+#define QB_VDQCR_VERB_WAE_SHIFT    5
+
+enum qb_pull_dt_e {
+	qb_pull_dt_channel,
+	qb_pull_dt_workqueue,
+	qb_pull_dt_framequeue
+};
+
+/**
+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
+ *                           default/starting state
+ * @d: the pull dequeue descriptor to be cleared
+ */
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+	memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
+ * @d:            the pull dequeue descriptor to be set
+ * @storage:      the pointer of the memory to store the dequeue result
+ * @storage_phys: the physical address of the storage memory
+ * @stash:        to indicate whether write allocate is enabled
+ *
+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute.
+ */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+				 struct dpaa2_dq *storage,
+				 dma_addr_t storage_phys,
+				 int stash)
+{
+	/* save the virtual address */
+	d->rsp_addr_virt = (u64)storage;
+
+	if (!storage) {
+		d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
+		return;
+	}
+	d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
+	if (stash)
+		d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
+	else
+		d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+
+	d->rsp_addr = cpu_to_le64(storage_phys);
+}
+
+/**
+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
+ * @d:         the pull dequeue descriptor to be set
+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
+ */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
+{
+	d->numf = numframes - 1;
+}
+
+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, u8 token)
+{
+	d->tok = token;
+}
+
+/*
+ * Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+
+/**
+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
+ * @fqid: the frame queue index of the given FQ
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
+{
+	d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
+	d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
+	d->dq_src = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
+ * @wqid: composed of channel id and wqid within the channel
+ * @dct:  the dequeue command type
+ */
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+			    enum qbman_pull_type_e dct)
+{
+	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+	d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
+	d->dq_src = cpu_to_le32(wqid);
+}
+
+/**
+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
+ *                                 dequeues
+ * @chid: the channel id to be dequeued
+ * @dct:  the dequeue command type
+ */
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+				 enum qbman_pull_type_e dct)
+{
+	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+	d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
+	d->dq_src = cpu_to_le32(chid);
+}
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ *     the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+	struct qbman_pull_desc *p;
+
+	if (!atomic_dec_and_test(&s->vdq.available)) {
+		atomic_inc(&s->vdq.available);
+		return -EBUSY;
+	}
+	s->vdq.storage = (void *)d->rsp_addr_virt;
+	d->tok = QMAN_DQ_TOKEN_VALID;
+	p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+	*p = *d;
+	dma_wmb();
+
+	/* Set the verb byte, have to substitute in the valid-bit */
+	p->verb |= s->vdq.valid_bit;
+	s->vdq.valid_bit ^= QB_VALID_BIT;
+
+	return 0;
+}
+
+#define QMAN_DQRR_PI_MASK   0xf
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+	u32 verb;
+	u32 response_verb;
+	u32 flags;
+	struct dpaa2_dq *p;
+
+	/* Before using valid-bit to detect if something is there, we have to
+	 * handle the case of the DQRR reset bug...
+	 */
+	if (unlikely(s->dqrr.reset_bug)) {
+		/*
+		 * We pick up new entries by cache-inhibited producer index,
+		 * which means that a non-coherent mapping would require us to
+		 * invalidate and read *only* once that PI has indicated that
+		 * there's an entry here. The first trip around the DQRR ring
+		 * will be much less efficient than all subsequent trips around
+		 * it...
+		 */
+		u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+			QMAN_DQRR_PI_MASK;
+
+		/* there are new entries if pi != next_idx */
+		if (pi == s->dqrr.next_idx)
+			return NULL;
+
+		/*
+		 * if next_idx is/was the last ring index, and 'pi' is
+		 * different, we can disable the workaround as all the ring
+		 * entries have now been DMA'd to so valid-bit checking is
+		 * repaired. Note: this logic needs to be based on next_idx
+		 * (which increments one at a time), rather than on pi (which
+		 * can burst and wrap-around between our snapshots of it).
+		 */
+		if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+			pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+				 s->dqrr.next_idx, pi);
+			s->dqrr.reset_bug = 0;
+		}
+		prefetch(qbman_get_cmd(s,
+				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+	}
+
+	p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+	verb = p->dq.verb;
+
+	/*
+	 * If the valid-bit isn't of the expected polarity, nothing there. Note,
+	 * in the DQRR reset bug workaround, we shouldn't need to skip these
+	 * check, because we've already determined that a new entry is available
+	 * and we've invalidated the cacheline before reading it, so the
+	 * valid-bit behaviour is repaired and should tell us what we already
+	 * knew from reading PI.
+	 */
+	if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+		prefetch(qbman_get_cmd(s,
+				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+		return NULL;
+	}
+	/*
+	 * There's something there. Move "next_idx" attention to the next ring
+	 * entry (and prefetch it) before returning what we found.
+	 */
+	s->dqrr.next_idx++;
+	s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+	if (!s->dqrr.next_idx)
+		s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+	/*
+	 * If this is the final response to a volatile dequeue command
+	 * indicate that the vdq is available
+	 */
+	flags = p->dq.stat;
+	response_verb = verb & QBMAN_RESULT_MASK;
+	if ((response_verb == QBMAN_RESULT_DQ) &&
+	    (flags & DPAA2_DQ_STAT_VOLATILE) &&
+	    (flags & DPAA2_DQ_STAT_EXPIRED))
+		atomic_inc(&s->vdq.available);
+
+	prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+	return p;
+}
+
+/**
+ * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
+ *                             qbman_swp_dqrr_next().
+ * @s: the software portal object
+ * @dq: the DQRR entry to be consumed
+ */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+	qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/**
+ * qbman_result_has_new_result() - Check and get the dequeue response from the
+ *                                 dq storage memory set in pull dequeue command
+ * @s: the software portal object
+ * @dq: the dequeue result read from the memory
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ *
+ * Only used for user-provided storage of dequeue results, not DQRR. For
+ * efficiency purposes, the driver will perform any required endianness
+ * conversion to ensure that the user's dequeue result storage is in host-endian
+ * format. As such, once the user has called qbman_result_has_new_result() and
+ * been returned a valid dequeue result, they should not call it again on
+ * the same memory location (except of course if another dequeue command has
+ * been executed to produce a new result to that location).
+ */
+int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+	if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
+		return 0;
+
+	/*
+	 * Set token to be 0 so we will detect change back to 1
+	 * next time the looping is traversed. Const is cast away here
+	 * as we want users to treat the dequeue responses as read only.
+	 */
+	((struct dpaa2_dq *)dq)->dq.tok = 0;
+
+	/*
+	 * Determine whether VDQCR is available based on whether the
+	 * current result is sitting in the first storage location of
+	 * the busy command.
+	 */
+	if (s->vdq.storage == dq) {
+		s->vdq.storage = NULL;
+		atomic_inc(&s->vdq.available);
+	}
+
+	return 1;
+}
+
+/**
+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
+ *                              default/starting state.
+ */
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+	memset(d, 0, sizeof(*d));
+	d->verb = 1 << 5; /* Release Command Valid */
+}
+
+/**
+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
+{
+	d->bpid = cpu_to_le16(bpid);
+}
+
+/**
+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
+ * interrupt source should be asserted after the release command is completed.
+ */
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
+{
+	if (enable)
+		d->verb |= 1 << 6;
+	else
+		d->verb &= ~(1 << 6);
+}
+
+#define RAR_IDX(rar)     ((rar) & 0x7)
+#define RAR_VB(rar)      ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+/**
+ * qbman_swp_release() - Issue a buffer release command
+ * @s:           the software portal object
+ * @d:           the release descriptor
+ * @buffers:     a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released,  must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+		      const u64 *buffers, unsigned int num_buffers)
+{
+	int i;
+	struct qbman_release_desc *p;
+	u32 rar;
+
+	if (!num_buffers || (num_buffers > 7))
+		return -EINVAL;
+
+	rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+	if (!RAR_SUCCESS(rar))
+		return -EBUSY;
+
+	/* Start the release command */
+	p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+	/* Copy the caller's buffer pointers to the command */
+	for (i = 0; i < num_buffers; i++)
+		p->buf[i] = cpu_to_le64(buffers[i]);
+	p->bpid = d->bpid;
+
+	/*
+	 * Set the verb byte, have to substitute in the valid-bit and the number
+	 * of buffers.
+	 */
+	dma_wmb();
+	p->verb = d->verb | RAR_VB(rar) | num_buffers;
+
+	return 0;
+}
+
+struct qbman_acquire_desc {
+	u8 verb;
+	u8 reserved;
+	u16 bpid;
+	u8 num;
+	u8 reserved2[59];
+};
+
+struct qbman_acquire_rslt {
+	u8 verb;
+	u8 rslt;
+	u16 reserved;
+	u8 num;
+	u8 reserved2[3];
+	u64 buf[7];
+};
+
+/**
+ * qbman_swp_acquire() - Issue a buffer acquire command
+ * @s:           the software portal object
+ * @bpid:        the buffer pool index
+ * @buffers:     a pointer pointing to the acquired buffer addresses
+ * @num_buffers: number of buffers to be acquired, must be less than 8
+ *
+ * Return 0 for success, or negative error code if the acquire command
+ * fails.
+ */
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+		      unsigned int num_buffers)
+{
+	struct qbman_acquire_desc *p;
+	struct qbman_acquire_rslt *r;
+	int i;
+
+	if (!num_buffers || (num_buffers > 7))
+		return -EINVAL;
+
+	/* Start the management command */
+	p = qbman_swp_mc_start(s);
+
+	if (!p)
+		return -EBUSY;
+
+	/* Encode the caller-provided attributes */
+	p->bpid = cpu_to_le16(bpid);
+	p->num = num_buffers;
+
+	/* Complete the management command */
+	r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
+	if (unlikely(!r)) {
+		pr_err("qbman: acquire from BPID %d failed, no response\n",
+		       bpid);
+		return -EIO;
+	}
+
+	/* Decode the outcome */
+	WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
+
+	/* Determine success or failure */
+	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+		pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
+		       bpid, r->rslt);
+		return -EIO;
+	}
+
+	WARN_ON(r->num > num_buffers);
+
+	/* Copy the acquired buffers to the caller's array */
+	for (i = 0; i < r->num; i++)
+		buffers[i] = le64_to_cpu(r->buf[i]);
+
+	return (int)r->num;
+}
+
+struct qbman_alt_fq_state_desc {
+	u8 verb;
+	u8 reserved[3];
+	u32 fqid;
+	u8 reserved2[56];
+};
+
+struct qbman_alt_fq_state_rslt {
+	u8 verb;
+	u8 rslt;
+	u8 reserved[62];
+};
+
+#define ALT_FQ_FQID_MASK 0x00FFFFFF
+
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+			   u8 alt_fq_verb)
+{
+	struct qbman_alt_fq_state_desc *p;
+	struct qbman_alt_fq_state_rslt *r;
+
+	/* Start the management command */
+	p = qbman_swp_mc_start(s);
+	if (!p)
+		return -EBUSY;
+
+	p->fqid = cpu_to_le32(fqid) & ALT_FQ_FQID_MASK;
+
+	/* Complete the management command */
+	r = qbman_swp_mc_complete(s, p, alt_fq_verb);
+	if (unlikely(!r)) {
+		pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
+		       alt_fq_verb);
+		return -EIO;
+	}
+
+	/* Decode the outcome */
+	WARN_ON(r->verb != alt_fq_verb);
+
+	/* Determine success or failure */
+	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+		pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
+		       fqid, r->verb, r->rslt);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+struct qbman_cdan_ctrl_desc {
+	u8 verb;
+	u8 reserved;
+	u16 ch;
+	u8 we;
+	u8 ctrl;
+	u16 reserved2;
+	u64 cdan_ctx;
+	u8 reserved3[48];
+
+};
+
+struct qbman_cdan_ctrl_rslt {
+	u8 verb;
+	u8 rslt;
+	u16 ch;
+	u8 reserved[60];
+};
+
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+		       u8 we_mask, u8 cdan_en,
+		       u64 ctx)
+{
+	struct qbman_cdan_ctrl_desc *p = NULL;
+	struct qbman_cdan_ctrl_rslt *r = NULL;
+
+	/* Start the management command */
+	p = qbman_swp_mc_start(s);
+	if (!p)
+		return -EBUSY;
+
+	/* Encode the caller-provided attributes */
+	p->verb = 0;
+	p->ch = cpu_to_le16(channelid);
+	p->we = we_mask;
+	if (cdan_en)
+		p->ctrl = 1;
+	else
+		p->ctrl = 0;
+	p->cdan_ctx = cpu_to_le64(ctx);
+
+	/* Complete the management command */
+	r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
+	if (unlikely(!r)) {
+		pr_err("qbman: wqchan config failed, no response\n");
+		return -EIO;
+	}
+
+	WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
+
+	/* Determine success or failure */
+	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+		pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
+		       channelid, r->rslt);
+		return -EIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
new file mode 100644
index 0000000..8428559
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
@@ -0,0 +1,469 @@
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_QBMAN_PORTAL_H
+#define __FSL_QBMAN_PORTAL_H
+
+#include "../../include/dpaa2-fd.h"
+
+struct dpaa2_dq;
+struct qbman_swp;
+
+/* qbman software portal descriptor structure */
+struct qbman_swp_desc {
+	void *cena_bar; /* Cache-enabled portal base address */
+	void *cinh_bar; /* Cache-inhibited portal base address */
+	u32 qman_version;
+};
+
+#define QBMAN_SWP_INTERRUPT_EQRI 0x01
+#define QBMAN_SWP_INTERRUPT_EQDI 0x02
+#define QBMAN_SWP_INTERRUPT_DQRI 0x04
+#define QBMAN_SWP_INTERRUPT_RCRI 0x08
+#define QBMAN_SWP_INTERRUPT_RCDI 0x10
+#define QBMAN_SWP_INTERRUPT_VDCI 0x20
+
+/* the structure for pull dequeue descriptor */
+struct qbman_pull_desc {
+	u8 verb;
+	u8 numf;
+	u8 tok;
+	u8 reserved;
+	u32 dq_src;
+	u64 rsp_addr;
+	u64 rsp_addr_virt;
+	u8 padding[40];
+};
+
+enum qbman_pull_type_e {
+	/* dequeue with priority precedence, respect intra-class scheduling */
+	qbman_pull_type_prio = 1,
+	/* dequeue with active FQ precedence, respect ICS */
+	qbman_pull_type_active,
+	/* dequeue with active FQ precedence, no ICS */
+	qbman_pull_type_active_noics
+};
+
+/* Definitions for parsing dequeue entries */
+#define QBMAN_RESULT_MASK      0x7f
+#define QBMAN_RESULT_DQ        0x60
+#define QBMAN_RESULT_FQRN      0x21
+#define QBMAN_RESULT_FQRNI     0x22
+#define QBMAN_RESULT_FQPN      0x24
+#define QBMAN_RESULT_FQDAN     0x25
+#define QBMAN_RESULT_CDAN      0x26
+#define QBMAN_RESULT_CSCN_MEM  0x27
+#define QBMAN_RESULT_CGCU      0x28
+#define QBMAN_RESULT_BPSCN     0x29
+#define QBMAN_RESULT_CSCN_WQ   0x2a
+
+/* QBMan FQ management command codes */
+#define QBMAN_FQ_SCHEDULE	0x48
+#define QBMAN_FQ_FORCE		0x49
+#define QBMAN_FQ_XON		0x4d
+#define QBMAN_FQ_XOFF		0x4e
+
+/* structure of enqueue descriptor */
+struct qbman_eq_desc {
+	u8 verb;
+	u8 dca;
+	u16 seqnum;
+	u16 orpid;
+	u16 reserved1;
+	u32 tgtid;
+	u32 tag;
+	u16 qdbin;
+	u8 qpri;
+	u8 reserved[3];
+	u8 wae;
+	u8 rspid;
+	u64 rsp_addr;
+	u8 fd[32];
+};
+
+/* buffer release descriptor */
+struct qbman_release_desc {
+	u8 verb;
+	u8 reserved;
+	u16 bpid;
+	u32 reserved2;
+	u64 buf[7];
+};
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK      0xf0
+
+#define CODE_CDAN_WE_EN    0x1
+#define CODE_CDAN_WE_CTX   0x4
+
+/* portal data structure */
+struct qbman_swp {
+	const struct qbman_swp_desc *desc;
+	void __iomem *addr_cena;
+	void __iomem *addr_cinh;
+
+	/* Management commands */
+	struct {
+		u32 valid_bit; /* 0x00 or 0x80 */
+	} mc;
+
+	/* Push dequeues */
+	u32 sdq;
+
+	/* Volatile dequeues */
+	struct {
+		atomic_t available; /* indicates if a command can be sent */
+		u32 valid_bit; /* 0x00 or 0x80 */
+		struct dpaa2_dq *storage; /* NULL if DQRR */
+	} vdq;
+
+	/* DQRR */
+	struct {
+		u32 next_idx;
+		u32 valid_bit;
+		u8 dqrr_size;
+		int reset_bug; /* indicates dqrr reset workaround is needed */
+	} dqrr;
+};
+
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
+void qbman_swp_finish(struct qbman_swp *p);
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
+void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
+void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+				 struct dpaa2_dq *storage,
+				 dma_addr_t storage_phys,
+				 int stash);
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+			    enum qbman_pull_type_e dct);
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+				 enum qbman_pull_type_e dct);
+
+int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
+
+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
+
+int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+			  u32 qd_bin, u32 qd_prio);
+
+int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
+		      const struct dpaa2_fd *fd);
+
+void qbman_release_desc_clear(struct qbman_release_desc *d);
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+		      const u64 *buffers, unsigned int num_buffers);
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+		      unsigned int num_buffers);
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+			   u8 alt_fq_verb);
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+		       u8 we_mask, u8 cdan_en,
+		       u64 ctx);
+
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/**
+ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
+ * @dq: the dequeue result to be checked
+ *
+ * DQRR entries may contain non-dequeue results, ie. notifications
+ */
+static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
+}
+
+/**
+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
+ * @dq: the dequeue result to be checked
+ *
+ */
+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
+{
+	return !qbman_result_is_DQ(dq);
+}
+
+/* FQ Data Availability */
+static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
+}
+
+/* Channel Data Availability */
+static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
+}
+
+/* Congestion State Change */
+static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
+}
+
+/* Buffer Pool State Change */
+static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
+}
+
+/* Congestion Group Count Update */
+static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
+}
+
+/* Retirement */
+static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
+}
+
+/* Retirement Immediate */
+static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
+}
+
+ /* Park */
+static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
+}
+
+/**
+ * qbman_result_SCN_state() - Get the state field in State-change notification
+ */
+static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
+{
+	return scn->scn.state;
+}
+
+#define SCN_RID_MASK 0x00FFFFFF
+
+/**
+ * qbman_result_SCN_rid() - Get the resource id in State-change notification
+ */
+static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
+{
+	return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
+}
+
+/**
+ * qbman_result_SCN_ctx() - Get the context data in State-change notification
+ */
+static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
+{
+	return le64_to_cpu(scn->scn.ctx);
+}
+
+/**
+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
+ * @s:    the software portal object
+ * @fqid: the index of frame queue to be scheduled
+ *
+ * There are a couple of different ways that a FQ can end up parked state,
+ * This schedules it.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
+{
+	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
+}
+
+/**
+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
+ * @s:    the software portal object
+ * @fqid: the index of frame queue to be forced
+ *
+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
+ * and thus be available for selection by any channel-dequeuing behaviour (push
+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
+ * empty at the time this happens, the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
+{
+	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
+}
+
+/**
+ * qbman_swp_fq_xon() - sets FQ flow-control to XON
+ * @s:    the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
+{
+	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
+/**
+ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
+ * @s:    the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ * XOFF FQs will remain in the tenatively-scheduled state, even when
+ * non-empty, meaning they won't be selected for scheduled dequeuing.
+ * If a FQ is changed to XOFF after it had already become truly-scheduled
+ * to a channel, and a pull dequeue of that channel occurs that selects
+ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
+{
+	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
+/* If the user has been allocated a channel object that is going to generate
+ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
+ * necessary.
+ *
+ * CDAN-enabled channels only generate a single CDAN notification, after which
+ * they need to be reenabled before they'll generate another. The idea is
+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
+ * reenable step. Each function generates a distinct command to hardware, so a
+ * combination function is provided if the user wishes to modify the "context"
+ * (which shows up in each CDAN message) each time they reenable, as a single
+ * command to hardware.
+ */
+
+/**
+ * qbman_swp_CDAN_set_context() - Set CDAN context
+ * @s:         the software portal object
+ * @channelid: the channel index
+ * @ctx:       the context to be set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
+					     u64 ctx)
+{
+	return qbman_swp_CDAN_set(s, channelid,
+				  CODE_CDAN_WE_CTX,
+				  0, ctx);
+}
+
+/**
+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
+ * @s:         the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
+{
+	return qbman_swp_CDAN_set(s, channelid,
+				  CODE_CDAN_WE_EN,
+				  1, 0);
+}
+
+/**
+ * qbman_swp_CDAN_disable() - disable CDAN for the channel
+ * @s:         the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
+{
+	return qbman_swp_CDAN_set(s, channelid,
+				  CODE_CDAN_WE_EN,
+				  0, 0);
+}
+
+/**
+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
+ * @s:         the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ * @ctx:i      the context set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
+						    u16 channelid,
+						    u64 ctx)
+{
+	return qbman_swp_CDAN_set(s, channelid,
+				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
+				  1, ctx);
+}
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+					  u8 cmd_verb)
+{
+	int loopvar = 1000;
+
+	qbman_swp_mc_submit(swp, cmd, cmd_verb);
+
+	do {
+		cmd = qbman_swp_mc_result(swp);
+	} while (!cmd && loopvar--);
+
+	WARN_ON(!loopvar);
+
+	return cmd;
+}
+
+#endif /* __FSL_QBMAN_PORTAL_H */
-- 
2.9.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ