[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1631554694-9599-19-git-send-email-abel.vesa@nxp.com>
Date: Mon, 13 Sep 2021 20:38:13 +0300
From: Abel Vesa <abel.vesa@....com>
To: Rob Herring <robh@...nel.org>, Dong Aisheng <aisheng.dong@....com>,
Shawn Guo <shawnguo@...nel.org>,
Sascha Hauer <s.hauer@...gutronix.de>,
Fabio Estevam <festevam@...il.com>,
"catalin.marinas@....com" <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
MyungJoo Ham <myungjoo.ham@...sung.com>,
Kyungmin Park <kyungmin.park@...sung.com>,
Chanwoo Choi <cw00.choi@...sung.com>,
Georgi Djakov <djakov@...nel.org>,
Adrian Hunter <adrian.hunter@...el.com>,
Ulf Hansson <ulf.hansson@...aro.org>,
Ahmad Fatoum <a.fatoum@...gutronix.de>
Cc: Pengutronix Kernel Team <kernel@...gutronix.de>,
linux-serial@...r.kernel.org, NXP Linux Team <linux-imx@....com>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
devicetree@...r.kernel.org, linux-pm@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, Abel Vesa <abel.vesa@....com>
Subject: [RFC 18/19] mmc: sdhci-esdhc-imx: Add interconnect support
On probe, if the dts node contains a valid icc path, then look for the
fsl,icc-rate property and get the rate. Also set the icc bandwidth
for that path to the nominal rate needed for sdhc to function right.
Then enable and disable the path every time the sdhc is used or not.
This will result in reducing the clock speeds along the icc path
for each pl301 and NoC, but still meet the requirements for all the
other icc consumers.
Signed-off-by: Abel Vesa <abel.vesa@....com>
---
drivers/mmc/host/sdhci-esdhc-imx.c | 27 +++++++++++++++++++++++++++
1 file changed, 27 insertions(+)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index f18d169bc8ff..9773a9efaae1 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -9,6 +9,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/interconnect.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/delay.h>
@@ -324,6 +325,9 @@ struct pltfm_imx_data {
struct clk *clk_ahb;
struct clk *clk_per;
unsigned int actual_clock;
+ struct icc_path *bus_path;
+ unsigned int bus_rate;
+
enum {
NO_CMD_PENDING, /* no multiblock command pending */
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
@@ -1573,6 +1577,19 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
+ imx_data->bus_path = devm_of_icc_get(&pdev->dev, "path");
+ if (IS_ERR(imx_data->bus_path)) {
+ err = PTR_ERR(imx_data->bus_path);
+ goto free_sdhci;
+ } else if (imx_data->bus_path) {
+ if (of_property_read_u32(pdev->dev.of_node, "fsl,icc-rate", &imx_data->bus_rate)) {
+ dev_err(&pdev->dev, "icc-rate missing\n");
+ return -EINVAL;
+ }
+
+ err = icc_set_bw(imx_data->bus_path, 0, imx_data->bus_rate);
+ }
+
imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx_data->clk_ipg)) {
err = PTR_ERR(imx_data->clk_ipg);
@@ -1762,14 +1779,20 @@ static int sdhci_esdhc_suspend(struct device *dev)
ret = mmc_gpio_set_cd_wake(host->mmc, true);
+ icc_disable(imx_data->bus_path);
+
return ret;
}
static int sdhci_esdhc_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
+ icc_enable(imx_data->bus_path);
+
ret = pinctrl_pm_select_default_state(dev);
if (ret)
return ret;
@@ -1821,6 +1844,8 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
+ icc_disable(imx_data->bus_path);
+
return ret;
}
@@ -1831,6 +1856,8 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int err;
+ icc_enable(imx_data->bus_path);
+
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
--
2.31.1
Powered by blists - more mailing lists