[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202108040604.fZj0f9S3-lkp@intel.com>
Date: Wed, 4 Aug 2021 06:56:13 +0800
From: kernel test robot <lkp@...el.com>
To: Rob Clark <robdclark@...omium.org>
Cc: kbuild-all@...ts.01.org, linux-kernel@...r.kernel.org,
Emma Anholt <emma@...olt.net>, Eric Anholt <eric@...olt.net>
Subject: [anholt:v5.14-rc3-for-mesa-ci 2/12]
drivers/gpu/drm/msm/adreno/a6xx_gpu.c:1084:4: error: implicit declaration of
function 'writeq'; did you mean 'writeb'?
tree: https://github.com/anholt/linux v5.14-rc3-for-mesa-ci
head: 3580bd58f2892026b425c56b90ca0d20f5a60fb0
commit: a1f113ba11daa45d43c04268a194a2b63314ff4d [2/12] drm/msm/a6xx: make GPUs SMMU context bank available in it's aperture.
config: arm-defconfig (attached as .config)
compiler: arm-linux-gnueabi-gcc (GCC) 10.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/anholt/linux/commit/a1f113ba11daa45d43c04268a194a2b63314ff4d
git remote add anholt https://github.com/anholt/linux
git fetch --no-tags anholt v5.14-rc3-for-mesa-ci
git checkout a1f113ba11daa45d43c04268a194a2b63314ff4d
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-10.3.0 make.cross ARCH=arm
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>
All errors (new ones prefixed by >>):
drivers/gpu/drm/msm/adreno/a6xx_gpu.c: In function 'a6xx_hw_init':
>> drivers/gpu/drm/msm/adreno/a6xx_gpu.c:1084:4: error: implicit declaration of function 'writeq'; did you mean 'writeb'? [-Werror=implicit-function-declaration]
1084 | writeq(0x48000, reg); /* offset of cb0 from gpu's base */
| ^~~~~~
| writeb
cc1: some warnings being treated as errors
vim +1084 drivers/gpu/drm/msm/adreno/a6xx_gpu.c
849
850 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
851 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
852 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
853 A6XX_RBBM_INT_0_MASK_CP_IB2 | \
854 A6XX_RBBM_INT_0_MASK_CP_IB1 | \
855 A6XX_RBBM_INT_0_MASK_CP_RB | \
856 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
857 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
858 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
859 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
860 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
861
862 static int a6xx_hw_init(struct msm_gpu *gpu)
863 {
864 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
865 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
866 int ret;
867
868 /* Make sure the GMU keeps the GPU on while we set it up */
869 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
870
871 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
872
873 /*
874 * Disable the trusted memory range - we don't actually supported secure
875 * memory rendering at this point in time and we don't want to block off
876 * part of the virtual memory space.
877 */
878 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
879 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
880 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
881
882 /* Turn on 64 bit addressing for all blocks */
883 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
884 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
885 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
886 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
887 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
888 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
889 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
890 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
891 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
892 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
893 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
894 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
895
896 /* enable hardware clockgating */
897 a6xx_set_hwcg(gpu, true);
898
899 /* VBIF/GBIF start*/
900 if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
901 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
902 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
903 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
904 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
905 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
906 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
907 } else {
908 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
909 }
910
911 if (adreno_is_a630(adreno_gpu))
912 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
913
914 /* Make all blocks contribute to the GPU BUSY perf counter */
915 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
916
917 /* Disable L2 bypass in the UCHE */
918 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
919 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
920 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
921 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
922 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
923 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
924
925 if (!adreno_is_a650_family(adreno_gpu)) {
926 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
927 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
928 REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
929
930 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
931 REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
932 0x00100000 + adreno_gpu->gmem - 1);
933 }
934
935 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
936 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
937
938 if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu))
939 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
940 else
941 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
942 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
943
944 if (adreno_is_a660(adreno_gpu))
945 gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
946
947 /* Setting the mem pool size */
948 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
949
950 /* Setting the primFifo thresholds default values,
951 * and vccCacheSkipDis=1 bit (0x200) for A640 and newer
952 */
953 if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
954 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
955 else if (adreno_is_a640(adreno_gpu))
956 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
957 else
958 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
959
960 /* Set the AHB default slave response to "ERROR" */
961 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
962
963 /* Turn on performance counters */
964 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
965
966 /* Select CP0 to always count cycles */
967 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
968
969 a6xx_set_ubwc_config(gpu);
970
971 /* Enable fault detection */
972 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
973 (1 << 30) | 0x1fffff);
974
975 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
976
977 /* Set weights for bicubic filtering */
978 if (adreno_is_a650_family(adreno_gpu)) {
979 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
980 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
981 0x3fe05ff4);
982 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
983 0x3fa0ebee);
984 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
985 0x3f5193ed);
986 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
987 0x3f0243f0);
988 }
989
990 /* Protect registers from the CP */
991 a6xx_set_cp_protect(gpu);
992
993 if (adreno_is_a660(adreno_gpu)) {
994 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
995 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
996 /* Set dualQ + disable afull for A660 GPU but not for A635 */
997 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
998 }
999
1000 /* Enable expanded apriv for targets that support it */
1001 if (gpu->hw_apriv) {
1002 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
1003 (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
1004 }
1005
1006 /* Enable interrupts */
1007 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
1008
1009 ret = adreno_hw_init(gpu);
1010 if (ret)
1011 goto out;
1012
1013 ret = a6xx_ucode_init(gpu);
1014 if (ret)
1015 goto out;
1016
1017 /* Set the ringbuffer address */
1018 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
1019 gpu->rb[0]->iova);
1020
1021 /* Targets that support extended APRIV can use the RPTR shadow from
1022 * hardware but all the other ones need to disable the feature. Targets
1023 * that support the WHERE_AM_I opcode can use that instead
1024 */
1025 if (adreno_gpu->base.hw_apriv)
1026 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
1027 else
1028 gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
1029 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
1030
1031 /*
1032 * Expanded APRIV and targets that support WHERE_AM_I both need a
1033 * privileged buffer to store the RPTR shadow
1034 */
1035
1036 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
1037 if (!a6xx_gpu->shadow_bo) {
1038 a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
1039 sizeof(u32) * gpu->nr_rings,
1040 MSM_BO_WC | MSM_BO_MAP_PRIV,
1041 gpu->aspace, &a6xx_gpu->shadow_bo,
1042 &a6xx_gpu->shadow_iova);
1043
1044 if (IS_ERR(a6xx_gpu->shadow))
1045 return PTR_ERR(a6xx_gpu->shadow);
1046 }
1047
1048 gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
1049 REG_A6XX_CP_RB_RPTR_ADDR_HI,
1050 shadowptr(a6xx_gpu, gpu->rb[0]));
1051 }
1052
1053 /* Always come up on rb 0 */
1054 a6xx_gpu->cur_ring = gpu->rb[0];
1055
1056 a6xx_gpu->cur_ctx = NULL;
1057
1058 /* Enable the SQE_to start the CP engine */
1059 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
1060
1061 ret = a6xx_cp_init(gpu);
1062 if (ret)
1063 goto out;
1064
1065 /*
1066 * Try to load a zap shader into the secure world. If successful
1067 * we can use the CP to switch out of secure mode. If not then we
1068 * have no resource but to try to switch ourselves out manually. If we
1069 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
1070 * be blocked and a permissions violation will soon follow.
1071 */
1072 ret = a6xx_zap_shader_init(gpu);
1073 if (!ret) {
1074 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
1075 OUT_RING(gpu->rb[0], 0x00000000);
1076
1077 a6xx_flush(gpu, gpu->rb[0]);
1078 if (!a6xx_idle(gpu, gpu->rb[0]))
1079 return -EINVAL;
1080 } else if (ret == -ENODEV) {
1081 static bool first = true;
1082 if (first) {
1083 void __iomem *reg = ioremap(0x05060000, 0x1000);
> 1084 writeq(0x48000, reg); /* offset of cb0 from gpu's base */
1085 iounmap(reg);
1086 }
1087 /*
1088 * This device does not use zap shader (but print a warning
1089 * just in case someone got their dt wrong.. hopefully they
1090 * have a debug UART to realize the error of their ways...
1091 * if you mess this up you are about to crash horribly)
1092 */
1093 dev_warn_once(gpu->dev->dev,
1094 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
1095 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
1096 ret = 0;
1097 } else {
1098 return ret;
1099 }
1100
1101 out:
1102 /*
1103 * Tell the GMU that we are done touching the GPU and it can start power
1104 * management
1105 */
1106 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
1107
1108 if (a6xx_gpu->gmu.legacy) {
1109 /* Take the GMU out of its special boot mode */
1110 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
1111 }
1112
1113 return ret;
1114 }
1115
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Download attachment ".config.gz" of type "application/gzip" (54333 bytes)
Powered by blists - more mailing lists