[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20251224101050.3497746-1-quic_mdalam@quicinc.com>
Date: Wed, 24 Dec 2025 15:40:50 +0530
From: Md Sadre Alam <quic_mdalam@...cinc.com>
To: <adrian.hunter@...el.com>, <quic_asutoshd@...cinc.com>,
<ulf.hansson@...aro.org>, <linux-mmc@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <linux-arm-msm@...r.kernel.org>,
<ebiggers@...nel.org>
CC: <quic_mdalam@...cinc.com>
Subject: [PATCH] mmc: sdhci-msm: Add quirk to disable CQE for ICE legacy mode
Some hosts require Inline Crypto Engine (ICE) to operate in legacy mode
instead of Command Queue Engine (CQE) mode for platform-specific
requirements or compatibility reasons. Introduce a host-level quirk
`host_disable_cqe` to forcefully disable CQE negotiation and allow ICE
to function through the legacy request path.
When the device tree omits the "supports-cqe" property, the driver sets
`host_disable_cqe = true` and avoids enabling MMC_CAP2_CQE during card
initialization. This ensures that even CQE-capable hardware falls back
to legacy SDHCI request handling. A minimal `cqhci_disable_ops` is
provided with `.cqe_enable = cqhci_host_disable` returning -EINVAL to
force the fallback. Other ops are left NULL for safe defaults.
For builds without CONFIG_MMC_CRYPTO, the driver uses standard
sdhci_add_host() to avoid unnecessary CQE infrastructure initialization.
This allows platforms to forcefully opt out of CQE usage and ensure ICE
operates reliably in legacy mode, providing stable crypto operations
without command queuing complexity.
Signed-off-by: Md Sadre Alam <quic_mdalam@...cinc.com>
---
drivers/mmc/host/cqhci-core.c | 28 +++++++++++++++++++++++++++-
drivers/mmc/host/sdhci-msm.c | 20 +++++++++++++++-----
include/linux/mmc/host.h | 1 +
3 files changed, 43 insertions(+), 6 deletions(-)
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index 178277d90c31..32da3b856db1 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -334,6 +334,12 @@ int cqhci_resume(struct mmc_host *mmc)
}
EXPORT_SYMBOL(cqhci_resume);
+static int cqhci_host_disable(struct mmc_host *mmc, struct mmc_card *card)
+{
+ pr_info("%s: Host does not want to use CMDQ\n", mmc_hostname(mmc));
+ return -EINVAL;
+}
+
static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
{
struct cqhci_host *cq_host = mmc->cqe_private;
@@ -1135,6 +1141,18 @@ static const struct mmc_cqe_ops cqhci_cqe_ops = {
.cqe_recovery_finish = cqhci_recovery_finish,
};
+static const struct mmc_cqe_ops cqhci_disable_ops = {
+ .cqe_enable = cqhci_host_disable,
+ .cqe_disable = NULL,
+ .cqe_request = NULL,
+ .cqe_post_req = NULL,
+ .cqe_off = NULL,
+ .cqe_wait_for_idle = NULL,
+ .cqe_timeout = NULL,
+ .cqe_recovery_start = NULL,
+ .cqe_recovery_finish = NULL,
+};
+
struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
{
struct cqhci_host *cq_host;
@@ -1188,7 +1206,15 @@ int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
cq_host->num_slots = NUM_SLOTS;
cq_host->dcmd_slot = DCMD_SLOT;
- mmc->cqe_ops = &cqhci_cqe_ops;
+ /*
+ * Some platforms may not support CQE reliably.
+ * Use host_disable_cqe to force fallback to
+ * legacy request path.
+ */
+ if (mmc->host_disable_cqe)
+ mmc->cqe_ops = &cqhci_disable_ops;
+ else
+ mmc->cqe_ops = &cqhci_cqe_ops;
mmc->cqe_qdepth = NUM_SLOTS;
if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index da356627d9de..3295e8c9650b 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2200,6 +2200,7 @@ static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
#endif
};
+#ifdef CONFIG_MMC_CRYPTO
static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
struct platform_device *pdev)
{
@@ -2228,7 +2229,8 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
goto cleanup;
}
- msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ if (!msm_host->mmc->host_disable_cqe)
+ msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
cq_host->ops = &sdhci_msm_cqhci_ops;
dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
@@ -2270,6 +2272,7 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
sdhci_cleanup_host(host);
return ret;
}
+#endif
/*
* Platform specific register write functions. This is so that, if any
@@ -2852,10 +2855,17 @@ static int sdhci_msm_probe(struct platform_device *pdev)
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_msm_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
- if (of_property_read_bool(node, "supports-cqe"))
- ret = sdhci_msm_cqe_add_host(host, pdev);
- else
- ret = sdhci_add_host(host);
+ /*
+ * If "supports-cqe" is not set in DT, disable CQE at host level.
+ * This allows ICE to operate in legacy mode.
+ */
+ msm_host->mmc->host_disable_cqe = !of_property_read_bool(node,
+ "supports-cqe");
+#ifdef CONFIG_MMC_CRYPTO
+ ret = sdhci_msm_cqe_add_host(host, pdev);
+#else
+ ret = sdhci_add_host(host);
+#endif
if (ret)
goto pm_runtime_disable;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index e0e2c265e5d1..8b963ccbda19 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -565,6 +565,7 @@ struct mmc_host {
int cqe_qdepth;
bool cqe_enabled;
bool cqe_on;
+ bool host_disable_cqe;
/* Inline encryption support */
#ifdef CONFIG_MMC_CRYPTO
--
2.34.1
Powered by blists - more mailing lists