[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1588031768-23677-5-git-send-email-chun-hung.wu@mediatek.com>
Date: Tue, 28 Apr 2020 07:56:07 +0800
From: Chun-Hung Wu <chun-hung.wu@...iatek.com>
To: <mirq-linux@...e.qmqm.pl>, Jonathan Hunter <jonathanh@...dia.com>,
Al Cooper <alcooperx@...il.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Florian Fainelli <f.fainelli@...il.com>,
<bcm-kernel-feedback-list@...adcom.com>,
Andy Gross <agross@...nel.org>,
Bjorn Andersson <bjorn.andersson@...aro.org>,
Michal Simek <michal.simek@...inx.com>,
Thierry Reding <thierry.reding@...il.com>,
Chaotian Jing <chaotian.jing@...iatek.com>,
Ulf Hansson <ulf.hansson@...aro.org>,
Rob Herring <robh+dt@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Matthias Brugger <matthias.bgg@...il.com>,
Linus Walleij <linus.walleij@...aro.org>,
Pavel Machek <pavel@....cz>,
Kate Stewart <kstewart@...uxfoundation.org>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Martin Blumenstingl <martin.blumenstingl@...glemail.com>,
Pan Bian <bianpan2016@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Allison Randal <allison@...utok.net>,
Mathieu Malaterre <malat@...ian.org>,
Stanley Chu <stanley.chu@...iatek.com>,
Kuohong Wang <kuohong.wang@...iatek.com>,
Yong Mao <yong.mao@...iatek.com>
CC: <kernel-team@...roid.com>, <linux-kernel@...r.kernel.org>,
<linux-mmc@...r.kernel.org>, <linux-mediatek@...ts.infradead.org>,
<devicetree@...r.kernel.org>, <wsd_upstream@...iatek.com>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-arm-msm@...r.kernel.org>, <linux-tegra@...r.kernel.org>,
Chun-Hung Wu <chun-hung.wu@...iatek.com>
Subject: [PATCH v5 4/5] mmc: mediatek: command queue support
Support command queue for mt6779 platform.
a. Add msdc_set_busy_timeout() to calculate emmc write timeout
b. Connect mtk msdc driver to cqhci driver through
host->cq_host->ops = &msdc_cmdq_ops;
c. msdc_cmdq_irq() will link up with cqchi_irq(). Besides, it provides
more irq error messages like RSPCRCERR/CMDTO/DATACRCERR/DATTMO.
d. Use the options below to separate support for CQHCI or not, because
some of our platform does not support CQHCI hence no kernel option:
CONFIG_MMC_CQHCI.
#if IS_ENABLED(CONFIG_MMC_CQHCI)
XXX //Support CQHCI
#else
XXX //Not support CQHCI
#endif
Signed-off-by: Chun-Hung Wu <chun-hung.wu@...iatek.com>
---
drivers/mmc/host/mtk-sd.c | 119 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 119 insertions(+)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index a2328fb..8516888 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -31,6 +31,8 @@
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
+#include "cqhci.h"
+
#define MAX_BD_NUM 1024
/*--------------------------------------------------------------------------*/
@@ -151,6 +153,7 @@
#define MSDC_INT_DMA_BDCSERR (0x1 << 17) /* W1C */
#define MSDC_INT_DMA_GPDCSERR (0x1 << 18) /* W1C */
#define MSDC_INT_DMA_PROTECT (0x1 << 19) /* W1C */
+#define MSDC_INT_CMDQ (0x1 << 28) /* W1C */
/* MSDC_INTEN mask */
#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */
@@ -181,6 +184,7 @@
/* SDC_CFG mask */
#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */
#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */
+#define SDC_CFG_WRDTOC (0x1fff << 2) /* RW */
#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */
#define SDC_CFG_SDIO (0x1 << 19) /* RW */
#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
@@ -229,6 +233,7 @@
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */
+#define MSDC_PB1_BUSY_CHECK_SEL (0x1 << 7) /* RW */
#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
@@ -432,6 +437,7 @@ struct msdc_host {
struct msdc_save_para save_para; /* used when gate HCLK */
struct msdc_tune_para def_tune_para; /* default tune setting */
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
+ struct cqhci_host *cq_host;
};
static const struct mtk_mmc_compatible mt8135_compat = {
@@ -528,6 +534,18 @@ static const struct mtk_mmc_compatible mt7620_compat = {
.use_internal_cd = true,
};
+static const struct mtk_mmc_compatible mt6779_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .enhance_rx = true,
+ .support_64g = true,
+};
+
static const struct of_device_id msdc_of_ids[] = {
{ .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
{ .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
@@ -537,6 +555,7 @@ static const struct of_device_id msdc_of_ids[] = {
{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
{ .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
{ .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
+ { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
{}
};
MODULE_DEVICE_TABLE(of, msdc_of_ids);
@@ -740,6 +759,15 @@ static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks)
(u32)(timeout > 255 ? 255 : timeout));
}
+static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks)
+{
+ u64 timeout;
+
+ timeout = msdc_timeout_cal(host, ns, clks);
+ sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC,
+ (u32)(timeout > 8191 ? 8191 : timeout));
+}
+
static void msdc_gate_clock(struct msdc_host *host)
{
clk_disable_unprepare(host->src_clk_cg);
@@ -1426,6 +1454,36 @@ static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
pm_runtime_put_noidle(host->dev);
}
+#if IS_ENABLED(CONFIG_MMC_CQHCI)
+static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
+{
+ int cmd_err = 0, dat_err = 0;
+
+ if (intsts & MSDC_INT_RSPCRCERR) {
+ cmd_err = (unsigned int)-EILSEQ;
+ dev_err(host->dev, "%s: CMD CRC ERR", __func__);
+ } else if (intsts & MSDC_INT_CMDTMO) {
+ cmd_err = (unsigned int)-ETIMEDOUT;
+ dev_err(host->dev, "%s: CMD TIMEOUT ERR", __func__);
+ }
+
+ if (intsts & MSDC_INT_DATCRCERR) {
+ dat_err = (unsigned int)-EILSEQ;
+ dev_err(host->dev, "%s: DATA CRC ERR", __func__);
+ } else if (intsts & MSDC_INT_DATTMO) {
+ dat_err = (unsigned int)-ETIMEDOUT;
+ dev_err(host->dev, "%s: DATA TIMEOUT ERR", __func__);
+ }
+
+ if (cmd_err || dat_err) {
+ dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x",
+ cmd_err, dat_err, intsts);
+ }
+
+ return cqhci_irq(host->mmc, 0, cmd_err, dat_err);
+}
+#endif
+
static irqreturn_t msdc_irq(int irq, void *dev_id)
{
struct msdc_host *host = (struct msdc_host *) dev_id;
@@ -1462,6 +1520,16 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
break;
+#if IS_ENABLED(CONFIG_MMC_CQHCI)
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) &&
+ (events & MSDC_INT_CMDQ)) {
+ msdc_cmdq_irq(host, events);
+ /* clear interrupts */
+ writel(events, host->base + MSDC_INT);
+ return IRQ_HANDLED;
+ }
+#endif
+
if (!mrq) {
dev_err(host->dev,
"%s: MRQ=NULL; events=%08X; event_mask=%08X\n",
@@ -2146,6 +2214,36 @@ static int msdc_get_cd(struct mmc_host *mmc)
return !val;
}
+static void msdc_cqe_enable(struct mmc_host *mmc)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+
+ /* enable cmdq irq */
+ writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN);
+ /* enable busy check */
+ sdr_set_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ /* default write data / busy timeout 20s */
+ msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0);
+ /* default read data timeout 1s */
+ msdc_set_timeout(host, 1000000000ULL, 0);
+}
+
+void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+
+ /* disable cmdq irq */
+ sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ);
+ /* disable busy check */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+
+ if (recovery) {
+ sdr_set_field(host->base + MSDC_DMA_CTRL,
+ MSDC_DMA_CTRL_STOP, 1);
+ msdc_reset_hw(host);
+ }
+}
+
static const struct mmc_host_ops mt_msdc_ops = {
.post_req = msdc_post_req,
.pre_req = msdc_pre_req,
@@ -2162,6 +2260,11 @@ static const struct mmc_host_ops mt_msdc_ops = {
.hw_reset = msdc_hw_reset,
};
+static const struct cqhci_host_ops msdc_cmdq_ops = {
+ .enable = msdc_cqe_enable,
+ .disable = msdc_cqe_disable,
+};
+
static void msdc_of_property_parse(struct platform_device *pdev,
struct msdc_host *host)
{
@@ -2312,6 +2415,22 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->dma_mask = DMA_BIT_MASK(32);
mmc_dev(mmc)->dma_mask = &host->dma_mask;
+#if IS_ENABLED(CONFIG_MMC_CQHCI)
+ if (mmc->caps2 & MMC_CAP2_CQE) {
+ host->cq_host = devm_kzalloc(host->mmc->parent,
+ sizeof(*host->cq_host),
+ GFP_KERNEL);
+ host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ host->cq_host->mmio = host->base + 0x800;
+ host->cq_host->ops = &msdc_cmdq_ops;
+ cqhci_init(host->cq_host, mmc, true);
+ mmc->max_segs = 128;
+ /* cqhci 16bit length */
+ /* 0 size, means 65536 so we don't have to -1 here */
+ mmc->max_seg_size = 64 * 1024;
+ }
+#endif
+
host->timeout_clks = 3 * 1048576;
host->dma.gpd = dma_alloc_coherent(&pdev->dev,
2 * sizeof(struct mt_gpdma_desc),
--
2.6.4
Powered by blists - more mailing lists