[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1464853088.7659.21.camel@infinera.com>
Date: Thu, 2 Jun 2016 07:37:32 +0000
From: Joakim Tjernlund <Joakim.Tjernlund@...inera.com>
To: "oss@...error.net" <oss@...error.net>,
"qiang.zhao@....com" <qiang.zhao@....com>
CC: "linuxppc-dev@...ts.ozlabs.org" <linuxppc-dev@...ts.ozlabs.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"xiaobo.xie@....com" <xiaobo.xie@....com>
Subject: Re: [Patch v2 5/5] drivers/net: support hdlc function for QE-UCC
On Thu, 2016-06-02 at 09:45 +0800, Zhao Qiang wrote:
> The driver add hdlc support for Freescale QUICC Engine.
> It support NMSI and TSA mode.
>
> Signed-off-by: Zhao Qiang <qiang.zhao@....com>
> ---
> Changes for v2:
> - remove useless code.
> - remove Unnecessary casts
> - return IRQ_NONE when there are no interrupt
> - remove Useless comments
>
> MAINTAINERS | 7 +
> drivers/net/wan/Kconfig | 11 +
> drivers/net/wan/Makefile | 1 +
> drivers/net/wan/fsl_ucc_hdlc.c | 1189 ++++++++++++++++++++++++++++++++++++++++
> drivers/net/wan/fsl_ucc_hdlc.h | 147 +++++
> include/soc/fsl/qe/qe.h | 1 +
> include/soc/fsl/qe/ucc_fast.h | 21 +-
> 7 files changed, 1375 insertions(+), 2 deletions(-)
> create mode 100644 drivers/net/wan/fsl_ucc_hdlc.c
> create mode 100644 drivers/net/wan/fsl_ucc_hdlc.h
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 74bbff3..bdada16 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -4572,6 +4572,13 @@ F: drivers/net/ethernet/freescale/gianfar*
> X: drivers/net/ethernet/freescale/gianfar_ptp.c
> F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
>
> +FREESCALE QUICC ENGINE UCC HDLC DRIVER
> +M: Zhao Qiang <qiang.zhao@....com>
> +L: netdev@...r.kernel.org
> +L: linuxppc-dev@...ts.ozlabs.org
> +S: Maintained
> +F: drivers/net/wan/fsl_ucc_hdlc*
> +
> FREESCALE QUICC ENGINE UCC UART DRIVER
> M: Timur Tabi <timur@...i.org>
> L: linuxppc-dev@...ts.ozlabs.org
> diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
> index a2fdd15..9e314b7 100644
> --- a/drivers/net/wan/Kconfig
> +++ b/drivers/net/wan/Kconfig
> @@ -280,6 +280,17 @@ config DSCC4
> To compile this driver as a module, choose M here: the
> module will be called dscc4.
>
> +config FSL_UCC_HDLC
> + tristate "Freescale QUICC Engine HDLC support"
> + depends on HDLC
> + depends on QUICC_ENGINE
> + help
> + Driver for Freescale QUICC Engine HDLC controller. The driver
> + supports HDLC in NMSI and TDM mode.
> +
> + To compile this driver as a module, choose M here: the
> + module will be called fsl_ucc_hdlc.
> +
> config DSCC4_PCISYNC
> bool "Etinc PCISYNC features"
> depends on DSCC4
> diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
> index c135ef4..25fec40 100644
> --- a/drivers/net/wan/Makefile
> +++ b/drivers/net/wan/Makefile
> @@ -32,6 +32,7 @@ obj-$(CONFIG_WANXL) += wanxl.o
> obj-$(CONFIG_PCI200SYN) += pci200syn.o
> obj-$(CONFIG_PC300TOO) += pc300too.o
> obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
> +obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o
>
> clean-files := wanxlfw.inc
> $(obj)/wanxl.o: $(obj)/wanxlfw.inc
> diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
> new file mode 100644
> index 0000000..f72634d
> --- /dev/null
> +++ b/drivers/net/wan/fsl_ucc_hdlc.c
> @@ -0,0 +1,1189 @@
> +/* Freescale QUICC Engine HDLC Device Driver
> + *
> + * Copyright 2016 Freescale Semiconductor Inc.
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License as published by the
> + * Free Software Foundation; either version 2 of the License, or (at your
> + * option) any later version.
> + */
> +
> +#include <linux/delay.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/hdlc.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/irq.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/netdevice.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_platform.h>
> +#include <linux/platform_device.h>
> +#include <linux/sched.h>
> +#include <linux/skbuff.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/stddef.h>
> +#include <soc/fsl/qe/qe_tdm.h>
> +#include <uapi/linux/if_arp.h>
> +
> +#include "fsl_ucc_hdlc.h"
> +
> +#define DRV_DESC "Freescale QE UCC HDLC Driver"
> +#define DRV_NAME "ucc_hdlc"
> +
> +#define TDM_PPPOHT_SLIC_MAXIN
> +#define BROKEN_FRAME_INFO
> +
> +static struct ucc_tdm_info utdm_primary_info = {
> + .uf_info = {
> + .tsa = 0,
> + .cdp = 0,
> + .cds = 1,
> + .ctsp = 1,
> + .ctss = 1,
> + .revd = 0,
> + .urfs = 256,
> + .utfs = 256,
> + .urfet = 128,
> + .urfset = 192,
> + .utfet = 128,
> + .utftt = 0x40,
> + .ufpt = 256,
> + .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
> + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
> + .tenc = UCC_FAST_TX_ENCODING_NRZ,
> + .renc = UCC_FAST_RX_ENCODING_NRZ,
> + .tcrc = UCC_FAST_16_BIT_CRC,
> + .synl = UCC_FAST_SYNC_LEN_NOT_USED,
> + },
> +
> + .si_info = {
> +#ifdef TDM_PPPOHT_SLIC_MAXIN
> + .simr_rfsd = 1,
> + .simr_tfsd = 2,
> +#else
> + .simr_rfsd = 0,
> + .simr_tfsd = 0,
> +#endif
> + .simr_crt = 0,
> + .simr_sl = 0,
> + .simr_ce = 1,
> + .simr_fe = 1,
> + .simr_gm = 0,
> + },
> +};
> +
> +static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
> +
> +static int uhdlc_init(struct ucc_hdlc_private *priv)
> +{
> + struct ucc_tdm_info *ut_info;
> + struct ucc_fast_info *uf_info;
> + u32 cecr_subblock;
> + u16 bd_status;
> + int ret, i;
> + void *bd_buffer;
> + dma_addr_t bd_dma_addr;
> + u32 riptr;
> + u32 tiptr;
> + u32 gumr;
> +
> + ut_info = priv->ut_info;
> + uf_info = &ut_info->uf_info;
> +
> + if (priv->tsa) {
> + uf_info->tsa = 1;
> + uf_info->ctsp = 1;
> + }
> + uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
> + UCC_HDLC_UCCE_TXB) << 16);
> +
> + ret = ucc_fast_init(uf_info, &priv->uccf);
> + if (ret) {
> + dev_err(priv->dev, "Failed to init uccf.");
> + return ret;
> + }
> +
> + priv->uf_regs = priv->uccf->uf_regs;
> + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
> +
> + /* Loopback mode */
> + if (priv->loopback) {
> + dev_info(priv->dev, "Loopback Mode\n");
> + gumr = ioread32be(&priv->uf_regs->gumr);
> + gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
> + UCC_FAST_GUMR_TCI);
> + gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
> + iowrite32be(gumr, &priv->uf_regs->gumr);
> + }
> +
> + /* Initialize SI */
> + if (priv->tsa)
> + ucc_tdm_init(priv->utdm, priv->ut_info);
> +
> + /* Write to QE CECR, UCCx channel to Stop Transmission */
> + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
> + ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
> + QE_CR_PROTOCOL_UNSPECIFIED, 0);
> +
> + /* Set UPSMR normal mode (need fixed)*/
> + iowrite32be(0, &priv->uf_regs->upsmr);
> +
> + priv->rx_ring_size = RX_BD_RING_LEN;
> + priv->tx_ring_size = TX_BD_RING_LEN;
> + /* Alloc Rx BD */
> + priv->rx_bd_base = dma_alloc_coherent(priv->dev,
> + RX_BD_RING_LEN * sizeof(struct qe_bd *),
> + &priv->dma_rx_bd, GFP_KERNEL);
> +
> + if (!priv->rx_bd_base) {
> + dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
> + ret = -ENOMEM;
> + goto rxbd_alloc_error;
> + }
> +
> + /* Alloc Tx BD */
> + priv->tx_bd_base = dma_alloc_coherent(priv->dev,
> + TX_BD_RING_LEN * sizeof(struct qe_bd *),
> + &priv->dma_tx_bd, GFP_KERNEL);
> +
> + if (!priv->tx_bd_base) {
> + dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
> + ret = -ENOMEM;
> + goto txbd_alloc_error;
> + }
> +
> + /* Alloc parameter ram for ucc hdlc */
> + priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
> + ALIGNMENT_OF_UCC_HDLC_PRAM);
> +
> + if (priv->ucc_pram_offset < 0) {
> + dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n");
> + ret = -ENOMEM;
> + goto pram_alloc_error;
> + }
> +
> + priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
> + GFP_KERNEL);
> + if (!priv->rx_skbuff)
> + goto rx_skb_alloc_error;
> +
> + priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
> + GFP_KERNEL);
> + if (!priv->tx_skbuff)
> + goto tx_skb_alloc_error;
> +
> + priv->skb_curtx = 0;
> + priv->skb_dirtytx = 0;
> + priv->curtx_bd = priv->tx_bd_base;
> + priv->dirty_tx = priv->tx_bd_base;
> + priv->currx_bd = priv->rx_bd_base;
> + priv->currx_bdnum = 0;
> +
> + /* init parameter base */
> + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
> + ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
> + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
> +
> + priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
> + qe_muram_addr(priv->ucc_pram_offset);
> +
> + /* Zero out parameter ram */
> + memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
> +
> + /* Alloc riptr, tiptr */
> + riptr = qe_muram_alloc(32, 32);
> + if (riptr < 0) {
> + dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
> + ret = -ENOMEM;
> + goto riptr_alloc_error;
> + }
> +
> + tiptr = qe_muram_alloc(32, 32);
> + if (tiptr < 0) {
> + dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
> + ret = -ENOMEM;
> + goto tiptr_alloc_error;
> + }
> +
> + /* Set RIPTR, TIPTR */
> + iowrite16be(riptr, &priv->ucc_pram->riptr);
> + iowrite16be(tiptr, &priv->ucc_pram->tiptr);
> +
> + /* Set MRBLR */
> + iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
> +
> + /* Set RBASE, TBASE */
> + iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
> + iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
> +
> + /* Set RSTATE, TSTATE */
> + iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
> + iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
> +
> + /* Set C_MASK, C_PRES for 16bit CRC */
> + iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
> + iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
> +
> + iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
> + iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
> + iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
> + iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
> + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
> + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
> + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
> + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
> +
> + /* Get BD buffer */
> + bd_buffer = dma_alloc_coherent(priv->dev,
> + (RX_BD_RING_LEN + TX_BD_RING_LEN) *
> + MAX_RX_BUF_LENGTH,
> + &bd_dma_addr, GFP_KERNEL);
> +
> + if (!bd_buffer) {
> + dev_err(priv->dev, "Could not allocate buffer descriptors\n");
> + ret = -ENOMEM;
> + goto bd_alloc_error;
> + }
> +
> + memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
> + * MAX_RX_BUF_LENGTH);
> +
> + priv->rx_buffer = bd_buffer;
> + priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
> +
> + priv->dma_rx_addr = bd_dma_addr;
> + priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
> +
> + for (i = 0; i < RX_BD_RING_LEN; i++) {
> + if (i < (RX_BD_RING_LEN - 1))
> + bd_status = R_E_S | R_I_S;
> + else
> + bd_status = R_E_S | R_I_S | R_W_S;
> +
> + iowrite16be(bd_status, &priv->rx_bd_base[i].status);
> + iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
> + &priv->rx_bd_base[i].buf);
> + }
> +
> + for (i = 0; i < TX_BD_RING_LEN; i++) {
> + if (i < (TX_BD_RING_LEN - 1))
> + bd_status = T_I_S | T_TC_S;
> + else
> + bd_status = T_I_S | T_TC_S | T_W_S;
> +
> + iowrite16be(bd_status, &priv->tx_bd_base[i].status);
> + iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
> + &priv->tx_bd_base[i].buf);
> + }
> +
> + return 0;
> +
> +bd_alloc_error:
> + qe_muram_free(tiptr);
> +tiptr_alloc_error:
> + qe_muram_free(riptr);
> +riptr_alloc_error:
> + kfree(priv->tx_skbuff);
> +tx_skb_alloc_error:
> + kfree(priv->rx_skbuff);
> +rx_skb_alloc_error:
> + qe_muram_free(priv->ucc_pram_offset);
> +pram_alloc_error:
> + dma_free_coherent(priv->dev,
> + TX_BD_RING_LEN * sizeof(struct qe_bd),
> + priv->tx_bd_base, priv->dma_tx_bd);
> +txbd_alloc_error:
> + dma_free_coherent(priv->dev,
> + RX_BD_RING_LEN * sizeof(struct qe_bd),
> + priv->rx_bd_base, priv->dma_rx_bd);
> +rxbd_alloc_error:
> + ucc_fast_free(priv->uccf);
> +
> + return ret;
> +}
> +
> +static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
> +{
> + hdlc_device *hdlc = dev_to_hdlc(dev);
> + struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
> + struct qe_bd __iomem *bd;
> + u16 bd_status;
> + unsigned long flags;
> + u8 *send_buf;
> + int i;
> + u16 *proto_head;
> +
> + switch (dev->type) {
> + case ARPHRD_RAWHDLC:
> + if (skb_headroom(skb) < HDLC_HEAD_LEN) {
> + dev->stats.tx_dropped++;
> + dev_kfree_skb(skb);
> + netdev_err(dev, "No enough space for hdlc head\n");
> + return -ENOMEM;
> + }
> +
> + skb_push(skb, HDLC_HEAD_LEN);
> +
> + proto_head = (u16 *)skb->data;
> + *proto_head = htons(DEFAULT_HDLC_HEAD);
> +
> + dev->stats.tx_bytes += skb->len;
> + break;
> +
> + case ARPHRD_PPP:
> + proto_head = (u16 *)skb->data;
> + if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
> + dev->stats.tx_dropped++;
> + dev_kfree_skb(skb);
> + netdev_err(dev, "Wrong ppp header\n");
> + return -ENOMEM;
> + }
> +
> + dev->stats.tx_bytes += skb->len;
> + break;
> +
> + default:
> + dev->stats.tx_dropped++;
> + dev_kfree_skb(skb);
> + return -ENOMEM;
> + }
> +
> + pr_info("Tx data skb->len:%d ", skb->len);
> + send_buf = (u8 *)skb->data;
> + pr_info("\nTransmitted data:\n");
> + for (i = 0; i < 16; i++) {
> + if (i == skb->len)
> + pr_info("++++");
> + else
> + pr_info("%02x\n", send_buf[i]);
> + }
> + spin_lock_irqsave(&priv->lock, flags);
> +
> + /* Start from the next BD that should be filled */
> + bd = priv->curtx_bd;
> + bd_status = ioread16be(&bd->status);
> + /* Save the skb pointer so we can free it later */
> + priv->tx_skbuff[priv->skb_curtx] = skb;
> +
> + /* Update the current skb pointer (wrapping if this was the last) */
> + priv->skb_curtx =
> + (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
> +
> + /* copy skb data to tx buffer for sdma processing */
> + memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
> + skb->data, skb->len);
> +
> + /* set bd status and length */
> + bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
> +
> + iowrite16be(bd_status, &bd->status);
> + iowrite16be(skb->len, &bd->length);
Should not status write be after length as status write will trigger the transmit?
Also, RX/TX error handling is very basic. There is only RX/TX errors but no
detail such as overrun/frame, carrier, crc etc.
These are very useful when trying to find the cause of an RX/TX error, especially
during devlopment.
Jocke
Powered by blists - more mailing lists