[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200917130523.GM3956970@smile.fi.intel.com>
Date: Thu, 17 Sep 2020 16:05:23 +0300
From: Andy Shevchenko <andriy.shevchenko@...el.com>
To: "Ramuthevar,Vadivel MuruganX"
<vadivel.muruganx.ramuthevar@...ux.intel.com>
Cc: miquel.raynal@...tlin.com, linux-kernel@...r.kernel.org,
linux-mtd@...ts.infradead.org, richard@....at, vigneshr@...com,
boris.brezillon@...labora.com, christophe.kerello@...com,
piotrs@...ence.com, robert.jarzmik@...e.fr,
brendanhiggins@...gle.com, devicetree@...r.kernel.org,
tglx@...utronix.de, hauke.mehrtens@...el.com, robh+dt@...nel.org,
linux-mips@...r.kernel.org, arnd@...db.de,
cheol.yong.kim@...el.com, qi-ming.wu@...el.com
Subject: Re: [PATCH v13 2/2] mtd: rawnand: Add NAND controller support on
Intel LGM SoC
On Thu, Sep 17, 2020 at 08:33:08AM +0800, Ramuthevar,Vadivel MuruganX wrote:
> From: Ramuthevar Vadivel Murugan <vadivel.muruganx.ramuthevar@...ux.intel.com>
>
> This patch adds the new IP of Nand Flash Controller(NFC) support
> on Intel's Lightning Mountain(LGM) SoC.
>
> DMA is used for burst data transfer operation, also DMA HW supports
> aligned 32bit memory address and aligned data access by default.
> DMA burst of 8 supported. Data register used to support the read/write
> operation from/to device.
>
> NAND controller driver implements ->exec_op() to replace legacy hooks,
> these specific call-back method to execute NAND operations.
...
> +#include <linux/clk.h>
> +#include <linux/completion.h>
> +#include <linux/dmaengine.h>
> +#include <linux/dma-direction.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/io.h>
> +#include <linux/iopoll.h>
io.h is guaranteed to be included by iopoll.h.
> +#include <linux/kernel.h>
> +#include <linux/platform_device.h>
> +#include <linux/module.h>
> +#include <linux/mtd/mtd.h>
> +#include <linux/mtd/rawnand.h>
> +#include <linux/mtd/nand_ecc.h>
> +#include <linux/mtd/nand.h>
Since mtd is a hosting framework for this driver, I would move this group of headers after more generic ones with a blank line in between.
> +#include <linux/resource.h>
And this I think is guaranteed to be included by io.h.
> +#include <linux/sched.h>
> +#include <linux/slab.h>
> +#include <linux/types.h>
> +#include <linux/platform_device.h>
Dup? It's exactly the reason how alphabetical order can help.
...
> +#define EBU_ADDR_SEL(n) (0x20 + (n) * 4)
I think 0x20 is an offset here, and better to have it as 0x020 to be consistent
with all other offsets.
...
> +#define EBU_BUSCON(n) (0x60 + (n) * 4)
Ditto.
...
> +static void ebu_nand_setup_timing(struct ebu_nand_controller *ctrl,
> + const struct nand_sdr_timings *timings)
> +{
> + unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
HZ_PER_MHZ?
> + unsigned int period = DIV_ROUND_UP(1000000, rate);
USEC_PER_SEC?
> + u32 trecov, thold, twrwait, trdwait;
> + u32 reg = 0;
> +
> + trecov = DIV_ROUND_UP(max(timings->tREA_max, timings->tREH_min),
> + period);
> + reg |= EBU_BUSCON_RECOVC(trecov);
> +
> + thold = DIV_ROUND_UP(max(timings->tDH_min, timings->tDS_min), period);
> + reg |= EBU_BUSCON_HOLDC(thold);
> +
> + trdwait = DIV_ROUND_UP(max(timings->tRC_min, timings->tREH_min),
> + period);
> + reg |= EBU_BUSCON_WAITRDC(trdwait);
> +
> + twrwait = DIV_ROUND_UP(max(timings->tWC_min, timings->tWH_min), period);
> + reg |= EBU_BUSCON_WAITWRC(twrwait);
> +
> + reg |= EBU_BUSCON_CMULT_V4 | EBU_BUSCON_BCGEN_CS | EBU_BUSCON_ALEC |
> + EBU_BUSCON_SETUP_EN;
> +
> + writel(reg, ctrl->ebu + EBU_BUSCON(ctrl->cs_num));
> +}
...
> + if (oob_required) {
> + reg = (chip->oob_poi[3] << 24) | (chip->oob_poi[2] << 16) |
> + (chip->oob_poi[1] << 8) | chip->oob_poi[0];
get_unligned_le32()?
...
> + reg = (chip->oob_poi[7] << 24) | (chip->oob_poi[6] << 16) |
> + (chip->oob_poi[5] << 8) | chip->oob_poi[4];
Ditto.
...
> + ret = readl_poll_timeout_atomic(int_sta, val,
> + !(val & HSNAND_INT_STA_WR_C), 10, 1000);
Slightly better (logically split between lines):
ret = readl_poll_timeout_atomic(int_sta, val, !(val & HSNAND_INT_STA_WR_C),
10, 1000);
> + if (ret)
> + return ret;
...
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ebunand");
> + ebu_host->ebu = devm_ioremap_resource(&pdev->dev, res);
Why not to use
ebu_host->ebu = devm_platform_ioremap_resource_byname(&pdev->dev, "ebunand");
?
> + if (IS_ERR(ebu_host->ebu))
> + return PTR_ERR(ebu_host->ebu);
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsnand");
> + ebu_host->hsnand = devm_ioremap_resource(&pdev->dev, res);
Ditto.
> + if (IS_ERR(ebu_host->hsnand))
> + return PTR_ERR(ebu_host->hsnand);
...
> + for (i = 0; i < MAX_CS; i++) {
> + resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", i);
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
> + resname);
> + if (!res)
> + return -EINVAL;
Redundant check.
> + ebu_host->cs[i].chipaddr = devm_ioremap_resource(dev, res);
Ditto (see above).
> + ebu_host->cs[i].nand_pa = res->start;
> + if (IS_ERR(ebu_host->cs[i].chipaddr))
> + return PTR_ERR(ebu_host->cs[i].chipaddr);
> + }
...
> + ebu_host->clk = devm_clk_get(dev, NULL);
> + if (IS_ERR(ebu_host->clk)) {
> + ret = PTR_ERR(ebu_host->clk);
> + dev_err(dev, "failed to get clock: %d\n", ret);
> + return ret;
return dev_err_probe() ?
> + }
...
> + ebu_host->dma_tx = dma_request_chan(dev, "tx");
> + if (IS_ERR(ebu_host->dma_tx)) {
> + ret = PTR_ERR(ebu_host->dma_tx);
> + dev_err(dev, "DMA tx channel request fail!.\n");
> + goto err_cleanup_dma;
Ditto. On top why !. ???
> + }
> +
> + ebu_host->dma_rx = dma_request_chan(dev, "rx");
> + if (IS_ERR(ebu_host->dma_rx)) {
> + ret = PTR_ERR(ebu_host->dma_rx);
> + dev_err(dev, "DMA rx channel request fail!.\n");
> + goto err_cleanup_dma;
Ditto.
> + }
--
With Best Regards,
Andy Shevchenko
Powered by blists - more mailing lists