[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <2888130d-3910-49fe-95ef-3864b1e2ff2d@intel.com>
Date: Thu, 20 Nov 2025 08:40:52 +0200
From: Adrian Hunter <adrian.hunter@...el.com>
To: Md Sadre Alam <quic_mdalam@...cinc.com>, <ulf.hansson@...aro.org>,
<abel.vesa@...aro.org>, <ebiggers@...gle.com>,
<linux-arm-msm@...r.kernel.org>, <linux-mmc@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v5] mmc: sdhci-msm: Enable ICE for CQE-capable controllers
with non-CQE cards
On 19/11/2025 13:46, Md Sadre Alam wrote:
> Enable Inline Crypto Engine (ICE) support for CQE-capable sdhci-msm
> controllers when used with eMMC cards that do not support CQE.
>
> This addresses the scenario where:
> - The host controller supports CQE (and has CQHCI crypto infrastructure)
> - The eMMC card does not support CQE
> - Standard (non-CMDQ) requests need crypto support
>
> This allows hardware-accelerated encryption and decryption for standard
> requests on CQE-capable hardware by utilizing the existing CQHCI crypto
> register space even when CQE functionality is not available due to card
> limitations.
>
> The implementation:
> - Adds ICE register definitions for non-CQE crypto configuration
> - Implements per-request crypto setup via sdhci_msm_ice_cfg()
> - Hooks into the request path via mmc_host_ops.request for non-CQE requests
> - Uses CQHCI register space (NONCQ_CRYPTO_PARM/DUN) for crypto configuration
>
> With this, CQE-capable controllers can benefit from inline encryption
> when paired with non-CQE cards, improving performance for encrypted I/O
> while maintaining compatibility with existing CQE crypto support.
>
> Signed-off-by: Md Sadre Alam <quic_mdalam@...cinc.com>
> Acked-by: Adrian Hunter <adrian.hunter@...el.com>
> ---
>
> Change in [v5]
>
> * Removed unused variable
>
> * Added proper comment for sdhci_msm_request()
>
> * Removed sdhci_msm_ice_enable(); it is already invoked during resume
>
> Change in [v4]
>
> * Moved ICE initialization for non cmdq into sdhci_msm_ice_cfg() and made
> it conditional on mrq->crypto_ctx to enable lazy setup.
>
> * Added msm_host->ice_init_done guard to prevent redundant initialization.
>
> * Updated commit message
>
> Change in [v3]
>
> * Refactored logic to use separate code paths for crypto_ctx != NULL and
> crypto_ctx == NULL to improve readability.
>
> * Renamed bypass to crypto_enable to align with bitfield semantics.
>
> * Removed slot variable
>
> * Added ICE initialization sequence for non-CMDQ eMMC devices before
> __sdhci_add_host()
>
> Change in [v2]
>
> * Moved NONCQ_CRYPTO_PARM and NONCQ_CRYPTO_DUN register definitions into
> sdhci-msm.c
>
> * Introduced use of GENMASK() and FIELD_PREP() macros for cleaner and more
> maintainable bitfield handling in ICE configuration.
>
> * Removed redundant if (!mrq || !cq_host) check from sdhci_msm_ice_cfg()
> as both are guaranteed to be valid in the current call path.
>
> * Added assignment of host->mmc_host_ops.request = sdhci_msm_request; to
> integrate ICE configuration into the standard request path for non-CMDQ
> eMMC devices.
>
> * Removed sdhci_crypto_cfg() from sdhci.c and its invocation in sdhci_request()
>
> Change in [v1]
>
> * Added initial support for Inline Crypto Engine (ICE) on non-CMDQ eMMC
> devices.
>
> drivers/mmc/host/sdhci-msm.c | 101 +++++++++++++++++++++++++++++++++++
> 1 file changed, 101 insertions(+)
>
> diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
> index 4e5edbf2fc9b..69c67242519c 100644
> --- a/drivers/mmc/host/sdhci-msm.c
> +++ b/drivers/mmc/host/sdhci-msm.c
> @@ -157,6 +157,18 @@
> #define CQHCI_VENDOR_CFG1 0xA00
> #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
>
> +/* non command queue crypto enable register*/
> +#define NONCQ_CRYPTO_PARM 0x70
> +#define NONCQ_CRYPTO_DUN 0x74
> +
> +#define DISABLE_CRYPTO BIT(15)
> +#define CRYPTO_GENERAL_ENABLE BIT(1)
> +#define HC_VENDOR_SPECIFIC_FUNC4 0x260
> +#define ICE_HCI_SUPPORT BIT(28)
> +
> +#define ICE_HCI_PARAM_CCI GENMASK(7, 0)
> +#define ICE_HCI_PARAM_CE GENMASK(8, 8)
> +
> struct sdhci_msm_offset {
> u32 core_hc_mode;
> u32 core_mci_data_cnt;
> @@ -300,6 +312,7 @@ struct sdhci_msm_host {
> u32 dll_config;
> u32 ddr_config;
> bool vqmmc_enabled;
> + bool ice_init_done;
> };
>
> static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
> @@ -2009,6 +2022,91 @@ static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
> return qcom_ice_evict_key(msm_host->ice, slot);
> }
>
> +static void sdhci_msm_non_cqe_ice_init(struct sdhci_host *host)
> +{
> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
> + struct mmc_host *mmc = msm_host->mmc;
> + struct cqhci_host *cq_host = mmc->cqe_private;
> + u32 config;
> + u32 ice_cap;
> +
> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
> + config &= ~DISABLE_CRYPTO;
> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
> + if (ice_cap & ICE_HCI_SUPPORT) {
> + config = cqhci_readl(cq_host, CQHCI_CFG);
> + config |= CRYPTO_GENERAL_ENABLE;
> + cqhci_writel(cq_host, config, CQHCI_CFG);
> + }
> +}
> +
> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq)
> +{
> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
> + struct mmc_host *mmc = msm_host->mmc;
> + struct cqhci_host *cq_host = mmc->cqe_private;
> + unsigned int crypto_params = 0;
> + int key_index;
> + bool crypto_enable;
> + u64 dun = 0;
> +
> + if (mrq->crypto_ctx) {
> + if (!msm_host->ice_init_done) {
> + sdhci_msm_non_cqe_ice_init(host);
> + msm_host->ice_init_done = true;
> + }
> +
> + crypto_enable = true;
> + dun = mrq->crypto_ctx->bc_dun[0];
> + key_index = mrq->crypto_key_slot;
> + crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, crypto_enable) |
> + FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
> +
> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
> + cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
> + } else {
> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
> + }
> +
> + /* Ensure crypto configuration is written before proceeding */
> + wmb();
> +
> + return 0;
> +}
> +
> +/*
> + * sdhci_msm_request - Handle non-CQE MMC requests with crypto support
> + * @mmc: MMC host
> + * @mrq: MMC request
> + *
> + * This function is called for non-CQE requests only. The MMC block layer
> + * routes requests as follows:
> + *
> + * if (host->cqe_enabled)
> + * ret = mmc_blk_cqe_issue_rw_rq(mq, req); // → cqhci_request()
> + * else
> + * ret = mmc_blk_mq_issue_rw_rq(mq, req); // → sdhci_msm_request()
> + *
> + * For CQE requests, crypto is handled in cqhci_request() in
> + * drivers/mmc/host/cqhci-core.c using the existing CQE crypto infrastructure.
> + *
> + * For non-CQE requests, this function provides crypto support by configuring
> + * the ICE (Inline Crypto Engine) registers before passing the request to
> + * the standard SDHCI request handler.
> + */
Kernel-style is not to put kernel-doc like comments on call-back
functions, since the functionality is defined by the upper layer,
and there is no point duplicating the information for every single
implementation.
> +static void sdhci_msm_request(struct mmc_host *mmc, struct mmc_request *mrq)
> +{
> + struct sdhci_host *host = mmc_priv(mmc);
> +
A simple comment here would suffice, say something like:
/* Only need to handle non-CQE crypto requests in this path */
> + if (mmc->caps2 & MMC_CAP2_CRYPTO)
> + sdhci_msm_ice_cfg(host, mrq);
> +
> + sdhci_request(mmc, mrq);
> +}
> +
> static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops = {
> .keyslot_program = sdhci_msm_ice_keyslot_program,
> .keyslot_evict = sdhci_msm_ice_keyslot_evict,
> @@ -2759,6 +2857,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
>
> msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
>
> +#ifdef CONFIG_MMC_CRYPTO
> + host->mmc_host_ops.request = sdhci_msm_request;
> +#endif
> /* Set the timeout value to max possible */
> host->max_timeout_count = 0xF;
>
Powered by blists - more mailing lists