[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240518-opp_support-v13-6-78c73edf50de@quicinc.com>
Date: Sat, 18 May 2024 19:01:47 +0530
From: Krishna chaitanya chundru <quic_krichai@...cinc.com>
To: Bjorn Andersson <andersson@...nel.org>,
Konrad Dybcio
<konrad.dybcio@...aro.org>,
Rob Herring <robh@...nel.org>,
"Krzysztof
Kozlowski" <krzysztof.kozlowski+dt@...aro.org>,
Conor Dooley
<conor+dt@...nel.org>,
Manivannan Sadhasivam
<manivannan.sadhasivam@...aro.org>,
Lorenzo Pieralisi
<lpieralisi@...nel.org>,
Krzysztof WilczyĆski
<kw@...ux.com>,
Bjorn Helgaas <bhelgaas@...gle.com>, <johan+linaro@...nel.org>,
<bmasney@...hat.com>, <djakov@...nel.org>
CC: <linux-arm-msm@...r.kernel.org>, <devicetree@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <linux-pci@...r.kernel.org>,
<vireshk@...nel.org>, <quic_vbadigan@...cinc.com>,
<quic_skananth@...cinc.com>, <quic_nitegupt@...cinc.com>,
<quic_parass@...cinc.com>, <quic_krichai@...cinc.com>,
<krzysztof.kozlowski@...aro.org>
Subject: [PATCH v13 6/6] PCI: qcom: Add OPP support to scale performance
QCOM Resource Power Manager-hardened (RPMh) is a hardware block which
maintains hardware state of a regulator by performing max aggregation of
the requests made by all of the clients.
PCIe controller can operate on different RPMh performance state of power
domain based on the speed of the link. And this performance state varies
from target to target, like some controllers support GEN3 in NOM (Nominal)
voltage corner, while some other supports GEN3 in low SVS (static voltage
scaling).
The SoC can be more power efficient if we scale the performance state
based on the aggregate PCIe link bandwidth.
Add Operating Performance Points (OPP) support to vote for RPMh state based
on the aggregate link bandwidth.
OPP can handle ICC bw voting also, so move ICC bw voting through OPP
framework if OPP entries are present.
As we are moving ICC voting as part of OPP, don't initialize ICC if OPP
is supported.
Before PCIe link is initialized vote for highest OPP in the OPP table,
so that we are voting for maximum voltage corner for the link to come up
in maximum supported speed.
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>
Signed-off-by: Krishna chaitanya chundru <quic_krichai@...cinc.com>
---
drivers/pci/controller/dwc/pcie-qcom.c | 93 +++++++++++++++++++++++++++-------
1 file changed, 75 insertions(+), 18 deletions(-)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index ed4453db3e56..9896545edc90 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/pcie.h>
@@ -1444,15 +1445,13 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
return 0;
}
-static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
+static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
{
+ int speed, width, ret, freq_mbps;
struct dw_pcie *pci = pcie->pci;
+ unsigned long freq_kbps;
+ struct dev_pm_opp *opp;
u32 offset, status;
- int speed, width;
- int ret;
-
- if (!pcie->icc_mem)
- return;
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
@@ -1464,10 +1463,26 @@ static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
- ret = icc_set_bw(pcie->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
- if (ret) {
- dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
- ret);
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ }
+ } else {
+ freq_mbps = pcie_link_speed_to_mbps(pcie_link_speed[speed]);
+ if (freq_mbps < 0)
+ return;
+
+ freq_kbps = freq_mbps * 1000;
+ opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, true);
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_set_opp(pci->dev, opp);
+ if (ret)
+ dev_err(pci->dev, "Failed to set OPP for freq (%ld): %d\n",
+ freq_kbps * width, ret);
+ }
+ dev_pm_opp_put(opp);
}
}
@@ -1511,7 +1526,9 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
+ unsigned long max_freq = INT_MAX;
struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
struct qcom_pcie *pcie;
struct dw_pcie_rp *pp;
struct resource *res;
@@ -1579,9 +1596,42 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- ret = qcom_pcie_icc_init(pcie);
- if (ret)
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(dev, ret, "Failed to add OPP table\n");
goto err_pm_runtime_put;
+ }
+
+ /*
+ * Before PCIe link is initialized vote for highest OPP in the OPP table,
+ * so that we are voting for maximum voltage corner for the link to come up
+ * in maximum supported speed. At the end of the probe(), OPP will be
+ * updated using qcom_pcie_icc_opp_update().
+ */
+ if (!ret) {
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (IS_ERR(opp)) {
+ dev_err_probe(pci->dev, PTR_ERR(opp),
+ "Unable to find max freq OPP\n");
+ goto err_pm_runtime_put;
+ } else {
+ ret = dev_pm_opp_set_opp(dev, opp);
+ }
+
+ dev_pm_opp_put(opp);
+ if (ret) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to set OPP for freq %ld\n",
+ max_freq);
+ goto err_pm_runtime_put;
+ }
+ } else {
+ /* Skip ICC init if OPP is supported as it is handled by OPP */
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
@@ -1601,7 +1651,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_phy_exit;
}
- qcom_pcie_icc_update(pcie);
+ qcom_pcie_icc_opp_update(pcie);
if (pcie->mhi)
qcom_pcie_init_debugfs(pcie);
@@ -1626,10 +1676,14 @@ static int qcom_pcie_suspend_noirq(struct device *dev)
* Set minimum bandwidth required to keep data path functional during
* suspend.
*/
- ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
- if (ret) {
- dev_err(dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", ret);
- return ret;
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(dev,
+ "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
}
/*
@@ -1662,6 +1716,9 @@ static int qcom_pcie_suspend_noirq(struct device *dev)
ret = icc_disable(pcie->icc_cpu);
if (ret)
dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
+
+ if (!pcie->icc_mem)
+ dev_pm_opp_set_opp(pcie->pci->dev, NULL);
}
return ret;
}
@@ -1687,7 +1744,7 @@ static int qcom_pcie_resume_noirq(struct device *dev)
pcie->suspended = false;
}
- qcom_pcie_icc_update(pcie);
+ qcom_pcie_icc_opp_update(pcie);
return 0;
}
--
2.42.0
Powered by blists - more mailing lists