[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241128-topic-sm8x50-gpu-bw-vote-v3-3-81d60c10fb73@linaro.org>
Date: Thu, 28 Nov 2024 11:25:43 +0100
From: Neil Armstrong <neil.armstrong@...aro.org>
To: Rob Clark <robdclark@...il.com>, Sean Paul <sean@...rly.run>,
Konrad Dybcio <konradybcio@...nel.org>,
Abhinav Kumar <quic_abhinavk@...cinc.com>,
Dmitry Baryshkov <dmitry.baryshkov@...aro.org>,
Marijn Suijten <marijn.suijten@...ainline.org>,
David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>,
Bjorn Andersson <andersson@...nel.org>, Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>,
Conor Dooley <conor+dt@...nel.org>,
Akhil P Oommen <quic_akhilpo@...cinc.com>
Cc: linux-arm-msm@...r.kernel.org, dri-devel@...ts.freedesktop.org,
freedreno@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
devicetree@...r.kernel.org, Neil Armstrong <neil.armstrong@...aro.org>
Subject: [PATCH v3 3/7] drm/msm: adreno: dynamically generate GMU bw table
The Adreno GPU Management Unit (GMU) can also scale the ddr
bandwidth along the frequency and power domain level, but for
now we statically fill the bw_table with values from the
downstream driver.
Only the first entry is used, which is a disable vote, so we
currently rely on scaling via the linux interconnect paths.
Let's dynamically generate the bw_table with the vote values
previously calculated from the OPPs.
Those entried will then be used by the GMU when passing the
appropriate bandwidth level while voting for a gpu frequency.
Signed-off-by: Neil Armstrong <neil.armstrong@...aro.org>
---
drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 39 ++++++++++++++++++++++++++++++++---
1 file changed, 36 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index cb8844ed46b29c4569d05eb7a24f7b27e173190f..fe1946650425b749bad483dad1e630bc8be83abc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -621,6 +621,35 @@ static void a740_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+static void a740_generate_bw_table(const struct a6xx_info *info, struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_bw_table *msg)
+{
+ unsigned int i, j;
+
+ msg->ddr_wait_bitmask = 0x7;
+
+ for (i = 0; i < GMU_MAX_BCMS; i++) {
+ if (!info->bcms[i].name)
+ break;
+ msg->ddr_cmds_addrs[i] = cmd_db_read_addr(info->bcms[i].name);
+ }
+ msg->ddr_cmds_num = i;
+
+ for (i = 0; i < gmu->nr_gpu_bws; ++i)
+ for (j = 0; j < msg->ddr_cmds_num; j++)
+ msg->ddr_cmds_data[i][j] = gmu->gpu_ib_votes[i][j];
+ msg->bw_level_num = gmu->nr_gpu_bws;
+
+ /* TODO also generate CNOC commands */
+
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x1;
+
+ msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
@@ -664,6 +693,7 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
struct a6xx_hfi_msg_bw_table *msg;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
if (gmu->bw_table)
goto send;
@@ -690,9 +720,12 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
a690_build_bw_table(msg);
else if (adreno_is_a730(adreno_gpu))
a730_build_bw_table(msg);
- else if (adreno_is_a740_family(adreno_gpu))
- a740_build_bw_table(msg);
- else
+ else if (adreno_is_a740_family(adreno_gpu)) {
+ if (info->bcms && gmu->nr_gpu_bws > 1)
+ a740_generate_bw_table(info, gmu, msg);
+ else
+ a740_build_bw_table(msg);
+ } else
a6xx_build_bw_table(msg);
gmu->bw_table = msg;
--
2.34.1
Powered by blists - more mailing lists