[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAF6AEGv4z=XBuiNdnga2LofubRLjZ40O6chpjGorqeZJz2YQXw@mail.gmail.com>
Date: Wed, 5 Feb 2020 13:18:33 -0800
From: Rob Clark <robdclark@...il.com>
To: Jordan Crouse <jcrouse@...eaurora.org>
Cc: linux-arm-msm <linux-arm-msm@...r.kernel.org>,
Sharat Masetty <smasetty@...eaurora.org>,
Douglas Anderson <dianders@...omium.org>,
Sean Paul <sean@...rly.run>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
dri-devel <dri-devel@...ts.freedesktop.org>,
David Airlie <airlied@...ux.ie>,
freedreno <freedreno@...ts.freedesktop.org>,
Daniel Vetter <daniel@...ll.ch>,
Stanimir Varbanov <stanimir.varbanov@...aro.org>
Subject: Re: [PATCH] drm/msm/a6xx: Update the GMU bus tables for sc7180
On Wed, Feb 5, 2020 at 9:01 AM Jordan Crouse <jcrouse@...eaurora.org> wrote:
>
> Fixup the GMU bus table values for the sc7180 target.
>
> Signed-off-by: Jordan Crouse <jcrouse@...eaurora.org>
I suspect that we'll need to figure out a better way to get these
values from the interconnect driver in the long run, esp. since there
are several different SoCs with a618.. but for now, this looks
reasonable
Reviewed-by: Rob Clark <robdclark@...il.com>
Fixes: e812744c5f95 ("drm: msm: a6xx: Add support for A618")
> ---
>
> drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 85 ++++++++++++++++++++++++-----------
> 1 file changed, 60 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
> index eda11ab..e450e0b 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
> @@ -7,6 +7,7 @@
>
> #include "a6xx_gmu.h"
> #include "a6xx_gmu.xml.h"
> +#include "a6xx_gpu.h"
>
> #define HFI_MSG_ID(val) [val] = #val
>
> @@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
> NULL, 0);
> }
>
> -static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
> +static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
> {
> - struct a6xx_hfi_msg_bw_table msg = { 0 };
> + /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
> + msg->bw_level_num = 1;
> +
> + msg->ddr_cmds_num = 3;
> + msg->ddr_wait_bitmask = 0x01;
> +
> + msg->ddr_cmds_addrs[0] = 0x50000;
> + msg->ddr_cmds_addrs[1] = 0x5003c;
> + msg->ddr_cmds_addrs[2] = 0x5000c;
> +
> + msg->ddr_cmds_data[0][0] = 0x40000000;
> + msg->ddr_cmds_data[0][1] = 0x40000000;
> + msg->ddr_cmds_data[0][2] = 0x40000000;
>
> /*
> - * The sdm845 GMU doesn't do bus frequency scaling on its own but it
> - * does need at least one entry in the list because it might be accessed
> - * when the GMU is shutting down. Send a single "off" entry.
> + * These are the CX (CNOC) votes - these are used by the GMU but the
> + * votes are known and fixed for the target
> */
> + msg->cnoc_cmds_num = 1;
> + msg->cnoc_wait_bitmask = 0x01;
> +
> + msg->cnoc_cmds_addrs[0] = 0x5007c;
> + msg->cnoc_cmds_data[0][0] = 0x40000000;
> + msg->cnoc_cmds_data[1][0] = 0x60000001;
> +}
>
> - msg.bw_level_num = 1;
> +static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
> +{
> + /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
> + msg->bw_level_num = 1;
>
> - msg.ddr_cmds_num = 3;
> - msg.ddr_wait_bitmask = 0x07;
> + msg->ddr_cmds_num = 3;
> + msg->ddr_wait_bitmask = 0x07;
>
> - msg.ddr_cmds_addrs[0] = 0x50000;
> - msg.ddr_cmds_addrs[1] = 0x5005c;
> - msg.ddr_cmds_addrs[2] = 0x5000c;
> + msg->ddr_cmds_addrs[0] = 0x50000;
> + msg->ddr_cmds_addrs[1] = 0x5005c;
> + msg->ddr_cmds_addrs[2] = 0x5000c;
>
> - msg.ddr_cmds_data[0][0] = 0x40000000;
> - msg.ddr_cmds_data[0][1] = 0x40000000;
> - msg.ddr_cmds_data[0][2] = 0x40000000;
> + msg->ddr_cmds_data[0][0] = 0x40000000;
> + msg->ddr_cmds_data[0][1] = 0x40000000;
> + msg->ddr_cmds_data[0][2] = 0x40000000;
>
> /*
> * These are the CX (CNOC) votes. This is used but the values for the
> * sdm845 GMU are known and fixed so we can hard code them.
> */
>
> - msg.cnoc_cmds_num = 3;
> - msg.cnoc_wait_bitmask = 0x05;
> + msg->cnoc_cmds_num = 3;
> + msg->cnoc_wait_bitmask = 0x05;
>
> - msg.cnoc_cmds_addrs[0] = 0x50034;
> - msg.cnoc_cmds_addrs[1] = 0x5007c;
> - msg.cnoc_cmds_addrs[2] = 0x5004c;
> + msg->cnoc_cmds_addrs[0] = 0x50034;
> + msg->cnoc_cmds_addrs[1] = 0x5007c;
> + msg->cnoc_cmds_addrs[2] = 0x5004c;
>
> - msg.cnoc_cmds_data[0][0] = 0x40000000;
> - msg.cnoc_cmds_data[0][1] = 0x00000000;
> - msg.cnoc_cmds_data[0][2] = 0x40000000;
> + msg->cnoc_cmds_data[0][0] = 0x40000000;
> + msg->cnoc_cmds_data[0][1] = 0x00000000;
> + msg->cnoc_cmds_data[0][2] = 0x40000000;
> +
> + msg->cnoc_cmds_data[1][0] = 0x60000001;
> + msg->cnoc_cmds_data[1][1] = 0x20000001;
> + msg->cnoc_cmds_data[1][2] = 0x60000001;
> +}
> +
> +
> +static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
> +{
> + struct a6xx_hfi_msg_bw_table msg = { 0 };
> + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
> + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
>
> - msg.cnoc_cmds_data[1][0] = 0x60000001;
> - msg.cnoc_cmds_data[1][1] = 0x20000001;
> - msg.cnoc_cmds_data[1][2] = 0x60000001;
> + if (adreno_is_a618(adreno_gpu))
> + a618_build_bw_table(&msg);
> + else
> + a6xx_build_bw_table(&msg);
>
> return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
> NULL, 0);
> --
> 2.7.4
Powered by blists - more mailing lists