[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230417-topic-dpu_regbus-v1-4-06fbdc1643c0@linaro.org>
Date: Mon, 17 Apr 2023 17:30:18 +0200
From: Konrad Dybcio <konrad.dybcio@...aro.org>
To: Rob Clark <robdclark@...il.com>,
Abhinav Kumar <quic_abhinavk@...cinc.com>,
Dmitry Baryshkov <dmitry.baryshkov@...aro.org>,
Sean Paul <sean@...rly.run>, David Airlie <airlied@...il.com>,
Daniel Vetter <daniel@...ll.ch>,
Rob Herring <robh+dt@...nel.org>,
Krzysztof Kozlowski <krzysztof.kozlowski+dt@...aro.org>,
Krishna Manikandan <quic_mkrishn@...cinc.com>
Cc: Marijn Suijten <marijn.suijten@...ainline.org>,
linux-arm-msm@...r.kernel.org, dri-devel@...ts.freedesktop.org,
freedreno@...ts.freedesktop.org, devicetree@...r.kernel.org,
linux-kernel@...r.kernel.org,
Konrad Dybcio <konrad.dybcio@...aro.org>
Subject: [PATCH 4/5] drm/msm/mdss: Handle the reg bus ICC path
Apart from the already handled data bus (MAS_MDP_Pn<->DDR), there's
another path that needs to be handled to ensure MDSS functions properly,
namely the "reg bus", a.k.a the CPU-MDSS interconnect.
Gating that path may have a variety of effects.. from none to otherwise
inexplicable DSI timeouts..
On the MDSS side, we only have to ensure that it's on at what Qualcomm
downstream calls "77 MHz", a.k.a 76.8 Mbps and turn it off at suspend.
To achieve that, make msm_mdss_icc_request_bw() accept a boolean to
indicate whether we want the busses to be on or off, as this function's
only use is to vote for minimum or no bandwidth at all.
Signed-off-by: Konrad Dybcio <konrad.dybcio@...aro.org>
---
drivers/gpu/drm/msm/msm_mdss.c | 17 +++++++++++++----
1 file changed, 13 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 9e2ce7f22677..4d126d20d661 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -50,6 +50,7 @@ struct msm_mdss {
const struct msm_mdss_data *mdss_data;
struct icc_path *mdp_path[2];
u32 num_mdp_paths;
+ struct icc_path *reg_bus_path;
};
static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
@@ -57,6 +58,7 @@ static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
{
struct icc_path *path0;
struct icc_path *path1;
+ struct icc_path *reg_bus_path;
path0 = of_icc_get(dev, "mdp0-mem");
if (IS_ERR_OR_NULL(path0))
@@ -71,6 +73,10 @@ static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
msm_mdss->num_mdp_paths++;
}
+ reg_bus_path = of_icc_get(dev, "cpu-cfg");
+ if (!IS_ERR_OR_NULL(reg_bus_path))
+ msm_mdss->reg_bus_path = reg_bus_path;
+
return 0;
}
@@ -83,12 +89,15 @@ static void msm_mdss_put_icc_path(void *data)
icc_put(msm_mdss->mdp_path[i]);
}
-static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
+static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, bool enable)
{
int i;
for (i = 0; i < msm_mdss->num_mdp_paths; i++)
- icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(bw));
+ icc_set_bw(msm_mdss->mdp_path[i], 0, enable ? Bps_to_icc(MIN_IB_BW) : 0);
+
+ if (msm_mdss->reg_bus_path)
+ icc_set_bw(msm_mdss->reg_bus_path, 0, enable ? 76800 : 0);
}
static void msm_mdss_irq(struct irq_desc *desc)
@@ -241,7 +250,7 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
* the interconnect is enabled (non-zero bandwidth). Let's make sure
* that the interconnects are at least at a minimum amount.
*/
- msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+ msm_mdss_icc_request_bw(msm_mdss, true);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
@@ -289,7 +298,7 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
static int msm_mdss_disable(struct msm_mdss *msm_mdss)
{
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
- msm_mdss_icc_request_bw(msm_mdss, 0);
+ msm_mdss_icc_request_bw(msm_mdss, false);
return 0;
}
--
2.40.0
Powered by blists - more mailing lists