[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aSSMxU6iNdAGheSc@lizhi-Precision-Tower-5810>
Date: Mon, 24 Nov 2025 11:50:13 -0500
From: Frank Li <Frank.li@....com>
To: Haibo Chen <haibo.chen@....com>
Cc: Han Xu <han.xu@....com>, Mark Brown <broonie@...nel.org>,
Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>,
Conor Dooley <conor+dt@...nel.org>, Shawn Guo <shawnguo@...nel.org>,
Sascha Hauer <s.hauer@...gutronix.de>,
Pengutronix Kernel Team <kernel@...gutronix.de>,
Fabio Estevam <festevam@...il.com>, linux-spi@...r.kernel.org,
imx@...ts.linux.dev, devicetree@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org
Subject: Re: [PATCH v5 2/2] spi: add driver for NXP XSPI controller
On Mon, Nov 24, 2025 at 05:25:22PM +0800, Haibo Chen wrote:
> Add driver support for NXP XSPI controller.
>
> XSPI is a flexible SPI host controller which supports up to
> 2 external devices (2 CS). It support Single/Dual/Quad/Octal
> mode data transfer.
>
> The difference between XSPI and Flexspi:
> 1.the register layout is total different.
> 2.XSPI support multiple independent execution environments
> (EENVs) for HW virtualization with some limitations. Each EENV
> has its own interrupt and its own set of programming registers
> that exists in a specific offset range in the XSPI memory map.
> The main environment (EENV0) address space contains all of the
> registers for controlling EENV0 plus all of the general XSPI
> control and programming registers. The register mnemonics for
> the user environments (EENV1 to EENV4) have "_SUB_n" appended
> to the mnemonic for the corresponding main-environment register.
Nit: please wrap at 75 char if need respin patch.
>
> Current driver based on EENV0, which means system already give
> EENV0 right to linux.
>
...
> + /* Wait for the CLR_RXF clear */
> + ret = readl_poll_timeout(base + XSPI_MCR, reg,
> + !(reg & XSPI_MCR_CLR_RXF), 1, POLL_TOUT_US);
> + WARN_ON(ret);
> +}
> +
> +static int nxp_xspi_do_op(struct nxp_xspi *xspi, const struct spi_mem_op *op)
> +{
> + void __iomem *base = xspi->iobase;
> + int watermark, err = 0;
> + u32 reg, len;
> +
> + len = op->data.nbytes;
> + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) {
> + /* Clear the TX FIFO. */
> + reg = readl(base + XSPI_MCR);
> + reg |= XSPI_MCR_CLR_TXF;
> + writel(reg, base + XSPI_MCR);
> + /* Wait for the CLR_TXF clear */
> + err = readl_poll_timeout(base + XSPI_MCR, reg,
> + !(reg & XSPI_MCR_CLR_TXF), 1, POLL_TOUT_US);
> + WARN_ON(err);
Is it enough by just print a warning? If timeout happen, is it to continue
below operation? and other void helper function (such as above WARN_ON(ret);)
If clean FIFO fail, suppose garbage data still is in FIFO. It think it
should return err to user space to indicate op failure.
Or are you sure our hardware can tolerant this error.
> + /* Cover the no 4bytes alignment data length */
> + watermark = (xspi->devtype_data->txfifo - ALIGN(op->data.nbytes, 4)) / 4 + 1;
> + reg = FIELD_PREP(XSPI_TBCT_WMRK_MASK, watermark);
> + writel(reg, base + XSPI_TBCT);
> + /*
> + * According to the RM, for TBDR register, a write transaction on the
> + * flash memory with data size of less than 32 bits leads to the removal
> + * of one data entry from the TX buffer. The valid bits are used and the
> + * rest of the bits are discarded.
> + * But for data size large than 32 bits, according to test, for no 4bytes
> + * alignment data, the last 1~3 bytes will lost, because TX buffer use
> + * 4 bytes entries.
> + * So here adjust the transfer data length to make it 4bytes alignment.
> + * then will meet the upper watermark setting, trigger the 4bytes entries
> + * pop out.
> + * Will use extra 0xff to append, refer to nxp_xspi_fill_txfifo().
> + */
> + if (len > 4)
> + len = ALIGN(op->data.nbytes, 4);
> +
> + } else if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) {
> + /* Invalid RXFIFO first */
> + reg = readl(base + XSPI_MCR);
> + reg |= XSPI_MCR_CLR_RXF;
> + writel(reg, base + XSPI_MCR);
> + /* Wait for the CLR_RXF clear */
> + err = readl_poll_timeout(base + XSPI_MCR, reg,
> + !(reg & XSPI_MCR_CLR_RXF), 1, POLL_TOUT_US);
> + WARN_ON(err);
> + reg = FIELD_PREP(XSPI_RBCT_WMRK_MASK, 31);
> + writel(reg, base + XSPI_RBCT);
> + }
> +
> + init_completion(&xspi->c);
> +
> + /* Config the data address */
> + writel(op->addr.val + xspi->memmap_phy, base + XSPI_SFP_TG_SFAR);
> +
> + /* Config the data size and lut id, trigger the transfer */
> + reg = FIELD_PREP(XSPI_SFP_TG_IPCR_SEQID_MASK, XSPI_SEQID_LUT) |
> + FIELD_PREP(XSPI_SFP_TG_IPCR_IDATSZ_MASK, len);
> + writel(reg, base + XSPI_SFP_TG_IPCR);
> +
> + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
> + nxp_xspi_fill_txfifo(xspi, op);
> +
> + /* Wait for the interrupt. */
> + if (!wait_for_completion_timeout(&xspi->c, msecs_to_jiffies(1000)))
> + err = -ETIMEDOUT;
> +
> + /* Invoke IP data read, if request is of data read. */
> + if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
> + nxp_xspi_read_rxfifo(xspi, op);
> +
> + return err;
> +}
...
> +static int nxp_xspi_probe(struct platform_device *pdev)
> +{
> + struct device *dev = &pdev->dev;
> + struct spi_controller *ctlr;
> + struct nxp_xspi *xspi;
> + struct resource *res;
> + int ret, irq;
> +
> + ctlr = devm_spi_alloc_host(dev, sizeof(*xspi));
> + if (!ctlr)
> + return -ENOMEM;
> +
> + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL |
> + SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL;
> +
> + xspi = spi_controller_get_devdata(ctlr);
> + xspi->dev = dev;
> + xspi->devtype_data = device_get_match_data(dev);
> + if (!xspi->devtype_data)
> + return -ENODEV;
> +
> + platform_set_drvdata(pdev, xspi);
> +
> + /* Find the resources - configuration register address space */
> + xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "base");
> + if (IS_ERR(xspi->iobase))
> + return PTR_ERR(xspi->iobase);
> +
> + /* Find the resources - controller memory mapped space */
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmap");
> + if (!res)
> + return -ENODEV;
> +
> + /* Assign memory mapped starting address and mapped size. */
> + xspi->memmap_phy = res->start;
> + xspi->memmap_phy_size = resource_size(res);
> +
> + /* Find the clocks */
> + xspi->clk = devm_clk_get(dev, "per");
> + if (IS_ERR(xspi->clk))
> + return PTR_ERR(xspi->clk);
> +
> + /* Find the irq */
> + irq = platform_get_irq(pdev, 0);
> + if (irq < 0)
> + return dev_err_probe(dev, irq, "Failed to get irq source");
> +
> + pm_runtime_set_autosuspend_delay(dev, XSPI_RPM_TIMEOUT_MS);
> + pm_runtime_use_autosuspend(dev);
> + devm_pm_runtime_enable(dev);
need check return value here.
Frank
> +
> + PM_RUNTIME_ACQUIRE_AUTOSUSPEND(dev, pm);
> + ret = PM_RUNTIME_ACQUIRE_ERR(&pm);
> + if (ret)
> + return dev_err_probe(dev, ret, "Failed to enable clock");
> +
> + /* Clear potential interrupt by write xspi errstat */
> + writel(0xFFFFFFFF, xspi->iobase + XSPI_ERRSTAT);
> + writel(0xFFFFFFFF, xspi->iobase + XSPI_FR);
> +
> + nxp_xspi_default_setup(xspi);
> +
> + ret = devm_request_irq(dev, irq,
> + nxp_xspi_irq_handler, 0, pdev->name, xspi);
> + if (ret)
> + return dev_err_probe(dev, ret, "failed to request irq");
> +
> + ret = devm_mutex_init(dev, &xspi->lock);
> + if (ret)
> + return ret;
> +
> + ret = devm_add_action_or_reset(dev, nxp_xspi_cleanup, xspi);
> + if (ret)
> + return ret;
> +
> + ctlr->bus_num = -1;
> + ctlr->num_chipselect = NXP_XSPI_MAX_CHIPSELECT;
> + ctlr->mem_ops = &nxp_xspi_mem_ops;
> + ctlr->mem_caps = &nxp_xspi_mem_caps;
> + ctlr->dev.of_node = dev->of_node;
> +
> + return devm_spi_register_controller(dev, ctlr);
> +}
> +
...
> +MODULE_DESCRIPTION("NXP xSPI Controller Driver");
> +MODULE_AUTHOR("NXP Semiconductor");
> +MODULE_AUTHOR("Haibo Chen <haibo.chen@....com>");
> +MODULE_LICENSE("GPL");
>
> --
> 2.34.1
>
Powered by blists - more mailing lists