cz_smc.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include "drmP.h"
  25. #include "amdgpu.h"
  26. #include "smu8.h"
  27. #include "smu8_fusion.h"
  28. #include "cz_ppsmc.h"
  29. #include "cz_smumgr.h"
  30. #include "smu_ucode_xfer_cz.h"
  31. #include "amdgpu_ucode.h"
  32. #include "smu/smu_8_0_d.h"
  33. #include "smu/smu_8_0_sh_mask.h"
  34. #include "gca/gfx_8_0_d.h"
  35. #include "gca/gfx_8_0_sh_mask.h"
  36. uint32_t cz_get_argument(struct amdgpu_device *adev)
  37. {
  38. return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
  39. }
  40. static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
  41. {
  42. struct cz_smu_private_data *priv =
  43. (struct cz_smu_private_data *)(adev->smu.priv);
  44. return priv;
  45. }
  46. int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
  47. {
  48. int i;
  49. u32 content = 0, tmp;
  50. for (i = 0; i < adev->usec_timeout; i++) {
  51. tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
  52. SMU_MP1_SRBM2P_RESP_0, CONTENT);
  53. if (content != tmp)
  54. break;
  55. udelay(1);
  56. }
  57. /* timeout means wrong logic*/
  58. if (i == adev->usec_timeout)
  59. return -EINVAL;
  60. WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
  61. WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
  62. return 0;
  63. }
  64. int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
  65. {
  66. int i;
  67. u32 content = 0, tmp = 0;
  68. if (cz_send_msg_to_smc_async(adev, msg))
  69. return -EINVAL;
  70. for (i = 0; i < adev->usec_timeout; i++) {
  71. tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
  72. SMU_MP1_SRBM2P_RESP_0, CONTENT);
  73. if (content != tmp)
  74. break;
  75. udelay(1);
  76. }
  77. /* timeout means wrong logic*/
  78. if (i == adev->usec_timeout)
  79. return -EINVAL;
  80. if (PPSMC_Result_OK != tmp) {
  81. dev_err(adev->dev, "SMC Failed to send Message.\n");
  82. return -EINVAL;
  83. }
  84. return 0;
  85. }
  86. int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
  87. u16 msg, u32 parameter)
  88. {
  89. WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
  90. return cz_send_msg_to_smc_async(adev, msg);
  91. }
  92. int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
  93. u16 msg, u32 parameter)
  94. {
  95. WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
  96. return cz_send_msg_to_smc(adev, msg);
  97. }
  98. static int cz_set_smc_sram_address(struct amdgpu_device *adev,
  99. u32 smc_address, u32 limit)
  100. {
  101. if (smc_address & 3)
  102. return -EINVAL;
  103. if ((smc_address + 3) > limit)
  104. return -EINVAL;
  105. WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
  106. return 0;
  107. }
  108. int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
  109. u32 *value, u32 limit)
  110. {
  111. int ret;
  112. ret = cz_set_smc_sram_address(adev, smc_address, limit);
  113. if (ret)
  114. return ret;
  115. *value = RREG32(mmMP0PUB_IND_DATA_0);
  116. return 0;
  117. }
  118. int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
  119. u32 value, u32 limit)
  120. {
  121. int ret;
  122. ret = cz_set_smc_sram_address(adev, smc_address, limit);
  123. if (ret)
  124. return ret;
  125. WREG32(mmMP0PUB_IND_DATA_0, value);
  126. return 0;
  127. }
  128. static int cz_smu_request_load_fw(struct amdgpu_device *adev)
  129. {
  130. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  131. uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
  132. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  133. cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
  134. /*prepare toc buffers*/
  135. cz_send_msg_to_smc_with_parameter(adev,
  136. PPSMC_MSG_DriverDramAddrHi,
  137. priv->toc_buffer.mc_addr_high);
  138. cz_send_msg_to_smc_with_parameter(adev,
  139. PPSMC_MSG_DriverDramAddrLo,
  140. priv->toc_buffer.mc_addr_low);
  141. cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
  142. /*execute jobs*/
  143. cz_send_msg_to_smc_with_parameter(adev,
  144. PPSMC_MSG_ExecuteJob,
  145. priv->toc_entry_aram);
  146. cz_send_msg_to_smc_with_parameter(adev,
  147. PPSMC_MSG_ExecuteJob,
  148. priv->toc_entry_power_profiling_index);
  149. cz_send_msg_to_smc_with_parameter(adev,
  150. PPSMC_MSG_ExecuteJob,
  151. priv->toc_entry_initialize_index);
  152. return 0;
  153. }
  154. /*
  155. *Check if the FW has been loaded, SMU will not return if loading
  156. *has not finished.
  157. */
  158. static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
  159. uint32_t fw_mask)
  160. {
  161. int i;
  162. uint32_t index = SMN_MP1_SRAM_START_ADDR +
  163. SMU8_FIRMWARE_HEADER_LOCATION +
  164. offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
  165. WREG32(mmMP0PUB_IND_INDEX, index);
  166. for (i = 0; i < adev->usec_timeout; i++) {
  167. if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
  168. break;
  169. udelay(1);
  170. }
  171. if (i >= adev->usec_timeout) {
  172. dev_err(adev->dev,
  173. "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
  174. fw_mask, RREG32(mmMP0PUB_IND_DATA));
  175. return -EINVAL;
  176. }
  177. return 0;
  178. }
  179. /*
  180. * interfaces for different ip blocks to check firmware loading status
  181. * 0 for success otherwise failed
  182. */
  183. static int cz_smu_check_finished(struct amdgpu_device *adev,
  184. enum AMDGPU_UCODE_ID id)
  185. {
  186. switch (id) {
  187. case AMDGPU_UCODE_ID_SDMA0:
  188. if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
  189. return 0;
  190. break;
  191. case AMDGPU_UCODE_ID_SDMA1:
  192. if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
  193. return 0;
  194. break;
  195. case AMDGPU_UCODE_ID_CP_CE:
  196. if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
  197. return 0;
  198. break;
  199. case AMDGPU_UCODE_ID_CP_PFP:
  200. if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
  201. return 0;
  202. case AMDGPU_UCODE_ID_CP_ME:
  203. if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
  204. return 0;
  205. break;
  206. case AMDGPU_UCODE_ID_CP_MEC1:
  207. if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
  208. return 0;
  209. break;
  210. case AMDGPU_UCODE_ID_CP_MEC2:
  211. if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
  212. return 0;
  213. break;
  214. case AMDGPU_UCODE_ID_RLC_G:
  215. if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
  216. return 0;
  217. break;
  218. case AMDGPU_UCODE_ID_MAXIMUM:
  219. default:
  220. break;
  221. }
  222. return 1;
  223. }
  224. static int cz_load_mec_firmware(struct amdgpu_device *adev)
  225. {
  226. struct amdgpu_firmware_info *ucode =
  227. &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
  228. uint32_t reg_data;
  229. uint32_t tmp;
  230. if (ucode->fw == NULL)
  231. return -EINVAL;
  232. /* Disable MEC parsing/prefetching */
  233. tmp = RREG32(mmCP_MEC_CNTL);
  234. tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
  235. tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
  236. WREG32(mmCP_MEC_CNTL, tmp);
  237. tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
  238. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
  239. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
  240. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
  241. tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
  242. WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
  243. reg_data = lower_32_bits(ucode->mc_addr) &
  244. REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
  245. WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
  246. reg_data = upper_32_bits(ucode->mc_addr) &
  247. REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
  248. WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
  249. return 0;
  250. }
  251. int cz_smu_start(struct amdgpu_device *adev)
  252. {
  253. int ret = 0;
  254. uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
  255. UCODE_ID_SDMA0_MASK |
  256. UCODE_ID_SDMA1_MASK |
  257. UCODE_ID_CP_CE_MASK |
  258. UCODE_ID_CP_ME_MASK |
  259. UCODE_ID_CP_PFP_MASK |
  260. UCODE_ID_CP_MEC_JT1_MASK |
  261. UCODE_ID_CP_MEC_JT2_MASK;
  262. if (adev->asic_type == CHIP_STONEY)
  263. fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
  264. cz_smu_request_load_fw(adev);
  265. ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
  266. if (ret)
  267. return ret;
  268. /* manually load MEC firmware for CZ */
  269. if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
  270. ret = cz_load_mec_firmware(adev);
  271. if (ret) {
  272. dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
  273. return ret;
  274. }
  275. }
  276. /* setup fw load flag */
  277. adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
  278. AMDGPU_SDMA1_UCODE_LOADED |
  279. AMDGPU_CPCE_UCODE_LOADED |
  280. AMDGPU_CPPFP_UCODE_LOADED |
  281. AMDGPU_CPME_UCODE_LOADED |
  282. AMDGPU_CPMEC1_UCODE_LOADED |
  283. AMDGPU_CPMEC2_UCODE_LOADED |
  284. AMDGPU_CPRLC_UCODE_LOADED;
  285. if (adev->asic_type == CHIP_STONEY)
  286. adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
  287. return ret;
  288. }
  289. static uint32_t cz_convert_fw_type(uint32_t fw_type)
  290. {
  291. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  292. switch (fw_type) {
  293. case UCODE_ID_SDMA0:
  294. result = AMDGPU_UCODE_ID_SDMA0;
  295. break;
  296. case UCODE_ID_SDMA1:
  297. result = AMDGPU_UCODE_ID_SDMA1;
  298. break;
  299. case UCODE_ID_CP_CE:
  300. result = AMDGPU_UCODE_ID_CP_CE;
  301. break;
  302. case UCODE_ID_CP_PFP:
  303. result = AMDGPU_UCODE_ID_CP_PFP;
  304. break;
  305. case UCODE_ID_CP_ME:
  306. result = AMDGPU_UCODE_ID_CP_ME;
  307. break;
  308. case UCODE_ID_CP_MEC_JT1:
  309. case UCODE_ID_CP_MEC_JT2:
  310. result = AMDGPU_UCODE_ID_CP_MEC1;
  311. break;
  312. case UCODE_ID_RLC_G:
  313. result = AMDGPU_UCODE_ID_RLC_G;
  314. break;
  315. default:
  316. DRM_ERROR("UCode type is out of range!");
  317. }
  318. return result;
  319. }
  320. static uint8_t cz_smu_translate_firmware_enum_to_arg(
  321. enum cz_scratch_entry firmware_enum)
  322. {
  323. uint8_t ret = 0;
  324. switch (firmware_enum) {
  325. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
  326. ret = UCODE_ID_SDMA0;
  327. break;
  328. case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
  329. ret = UCODE_ID_SDMA1;
  330. break;
  331. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
  332. ret = UCODE_ID_CP_CE;
  333. break;
  334. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
  335. ret = UCODE_ID_CP_PFP;
  336. break;
  337. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
  338. ret = UCODE_ID_CP_ME;
  339. break;
  340. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
  341. ret = UCODE_ID_CP_MEC_JT1;
  342. break;
  343. case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
  344. ret = UCODE_ID_CP_MEC_JT2;
  345. break;
  346. case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
  347. ret = UCODE_ID_GMCON_RENG;
  348. break;
  349. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
  350. ret = UCODE_ID_RLC_G;
  351. break;
  352. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
  353. ret = UCODE_ID_RLC_SCRATCH;
  354. break;
  355. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
  356. ret = UCODE_ID_RLC_SRM_ARAM;
  357. break;
  358. case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
  359. ret = UCODE_ID_RLC_SRM_DRAM;
  360. break;
  361. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
  362. ret = UCODE_ID_DMCU_ERAM;
  363. break;
  364. case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
  365. ret = UCODE_ID_DMCU_IRAM;
  366. break;
  367. case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
  368. ret = TASK_ARG_INIT_MM_PWR_LOG;
  369. break;
  370. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
  371. case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
  372. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
  373. case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
  374. case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
  375. case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
  376. ret = TASK_ARG_REG_MMIO;
  377. break;
  378. case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
  379. ret = TASK_ARG_INIT_CLK_TABLE;
  380. break;
  381. }
  382. return ret;
  383. }
  384. static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
  385. enum cz_scratch_entry firmware_enum,
  386. struct cz_buffer_entry *entry)
  387. {
  388. uint64_t gpu_addr;
  389. uint32_t data_size;
  390. uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  391. enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
  392. struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
  393. const struct gfx_firmware_header_v1_0 *header;
  394. if (ucode->fw == NULL)
  395. return -EINVAL;
  396. gpu_addr = ucode->mc_addr;
  397. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  398. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  399. if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
  400. (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
  401. gpu_addr += le32_to_cpu(header->jt_offset) << 2;
  402. data_size = le32_to_cpu(header->jt_size) << 2;
  403. }
  404. entry->mc_addr_low = lower_32_bits(gpu_addr);
  405. entry->mc_addr_high = upper_32_bits(gpu_addr);
  406. entry->data_size = data_size;
  407. entry->firmware_ID = firmware_enum;
  408. return 0;
  409. }
  410. static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
  411. enum cz_scratch_entry scratch_type,
  412. uint32_t size_in_byte,
  413. struct cz_buffer_entry *entry)
  414. {
  415. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  416. uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
  417. priv->smu_buffer.mc_addr_low;
  418. mc_addr += size_in_byte;
  419. priv->smu_buffer_used_bytes += size_in_byte;
  420. entry->data_size = size_in_byte;
  421. entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
  422. entry->mc_addr_low = lower_32_bits(mc_addr);
  423. entry->mc_addr_high = upper_32_bits(mc_addr);
  424. entry->firmware_ID = scratch_type;
  425. return 0;
  426. }
  427. static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
  428. enum cz_scratch_entry firmware_enum,
  429. bool is_last)
  430. {
  431. uint8_t i;
  432. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  433. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  434. struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
  435. task->type = TASK_TYPE_UCODE_LOAD;
  436. task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  437. task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
  438. for (i = 0; i < priv->driver_buffer_length; i++)
  439. if (priv->driver_buffer[i].firmware_ID == firmware_enum)
  440. break;
  441. if (i >= priv->driver_buffer_length) {
  442. dev_err(adev->dev, "Invalid Firmware Type\n");
  443. return -EINVAL;
  444. }
  445. task->addr.low = priv->driver_buffer[i].mc_addr_low;
  446. task->addr.high = priv->driver_buffer[i].mc_addr_high;
  447. task->size_bytes = priv->driver_buffer[i].data_size;
  448. return 0;
  449. }
  450. static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
  451. enum cz_scratch_entry firmware_enum,
  452. uint8_t type, bool is_last)
  453. {
  454. uint8_t i;
  455. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  456. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  457. struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
  458. task->type = type;
  459. task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
  460. task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
  461. for (i = 0; i < priv->scratch_buffer_length; i++)
  462. if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
  463. break;
  464. if (i >= priv->scratch_buffer_length) {
  465. dev_err(adev->dev, "Invalid Firmware Type\n");
  466. return -EINVAL;
  467. }
  468. task->addr.low = priv->scratch_buffer[i].mc_addr_low;
  469. task->addr.high = priv->scratch_buffer[i].mc_addr_high;
  470. task->size_bytes = priv->scratch_buffer[i].data_size;
  471. if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
  472. struct cz_ih_meta_data *pIHReg_restore =
  473. (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
  474. pIHReg_restore->command =
  475. METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
  476. }
  477. return 0;
  478. }
  479. static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
  480. {
  481. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  482. priv->toc_entry_aram = priv->toc_entry_used_count;
  483. cz_smu_populate_single_scratch_task(adev,
  484. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  485. TASK_TYPE_UCODE_SAVE, true);
  486. return 0;
  487. }
  488. static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
  489. {
  490. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  491. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  492. toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
  493. cz_smu_populate_single_scratch_task(adev,
  494. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  495. TASK_TYPE_UCODE_SAVE, false);
  496. cz_smu_populate_single_scratch_task(adev,
  497. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  498. TASK_TYPE_UCODE_SAVE, true);
  499. return 0;
  500. }
  501. static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
  502. {
  503. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  504. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  505. toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
  506. /* populate ucode */
  507. if (adev->firmware.smu_load) {
  508. cz_smu_populate_single_ucode_load_task(adev,
  509. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  510. cz_smu_populate_single_ucode_load_task(adev,
  511. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  512. cz_smu_populate_single_ucode_load_task(adev,
  513. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  514. cz_smu_populate_single_ucode_load_task(adev,
  515. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  516. if (adev->asic_type == CHIP_STONEY) {
  517. cz_smu_populate_single_ucode_load_task(adev,
  518. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  519. } else {
  520. cz_smu_populate_single_ucode_load_task(adev,
  521. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  522. }
  523. cz_smu_populate_single_ucode_load_task(adev,
  524. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
  525. }
  526. /* populate scratch */
  527. cz_smu_populate_single_scratch_task(adev,
  528. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  529. TASK_TYPE_UCODE_LOAD, false);
  530. cz_smu_populate_single_scratch_task(adev,
  531. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  532. TASK_TYPE_UCODE_LOAD, false);
  533. cz_smu_populate_single_scratch_task(adev,
  534. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  535. TASK_TYPE_UCODE_LOAD, true);
  536. return 0;
  537. }
  538. static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
  539. {
  540. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  541. priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
  542. cz_smu_populate_single_scratch_task(adev,
  543. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  544. TASK_TYPE_INITIALIZE, true);
  545. return 0;
  546. }
  547. static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
  548. {
  549. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  550. priv->toc_entry_initialize_index = priv->toc_entry_used_count;
  551. if (adev->firmware.smu_load) {
  552. cz_smu_populate_single_ucode_load_task(adev,
  553. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
  554. if (adev->asic_type == CHIP_STONEY) {
  555. cz_smu_populate_single_ucode_load_task(adev,
  556. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
  557. } else {
  558. cz_smu_populate_single_ucode_load_task(adev,
  559. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
  560. }
  561. cz_smu_populate_single_ucode_load_task(adev,
  562. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
  563. cz_smu_populate_single_ucode_load_task(adev,
  564. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
  565. cz_smu_populate_single_ucode_load_task(adev,
  566. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
  567. cz_smu_populate_single_ucode_load_task(adev,
  568. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  569. if (adev->asic_type == CHIP_STONEY) {
  570. cz_smu_populate_single_ucode_load_task(adev,
  571. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
  572. } else {
  573. cz_smu_populate_single_ucode_load_task(adev,
  574. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
  575. }
  576. cz_smu_populate_single_ucode_load_task(adev,
  577. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
  578. }
  579. return 0;
  580. }
  581. static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
  582. {
  583. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  584. priv->toc_entry_clock_table = priv->toc_entry_used_count;
  585. cz_smu_populate_single_scratch_task(adev,
  586. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  587. TASK_TYPE_INITIALIZE, true);
  588. return 0;
  589. }
  590. static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
  591. {
  592. int i;
  593. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  594. struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
  595. for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
  596. toc->JobList[i] = (uint8_t)IGNORE_JOB;
  597. return 0;
  598. }
  599. /*
  600. * cz smu uninitialization
  601. */
  602. int cz_smu_fini(struct amdgpu_device *adev)
  603. {
  604. amdgpu_bo_unref(&adev->smu.toc_buf);
  605. amdgpu_bo_unref(&adev->smu.smu_buf);
  606. kfree(adev->smu.priv);
  607. adev->smu.priv = NULL;
  608. if (adev->firmware.smu_load)
  609. amdgpu_ucode_fini_bo(adev);
  610. return 0;
  611. }
  612. int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
  613. {
  614. uint8_t i;
  615. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  616. for (i = 0; i < priv->scratch_buffer_length; i++)
  617. if (priv->scratch_buffer[i].firmware_ID ==
  618. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  619. break;
  620. if (i >= priv->scratch_buffer_length) {
  621. dev_err(adev->dev, "Invalid Scratch Type\n");
  622. return -EINVAL;
  623. }
  624. *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
  625. /* prepare buffer for pptable */
  626. cz_send_msg_to_smc_with_parameter(adev,
  627. PPSMC_MSG_SetClkTableAddrHi,
  628. priv->scratch_buffer[i].mc_addr_high);
  629. cz_send_msg_to_smc_with_parameter(adev,
  630. PPSMC_MSG_SetClkTableAddrLo,
  631. priv->scratch_buffer[i].mc_addr_low);
  632. cz_send_msg_to_smc_with_parameter(adev,
  633. PPSMC_MSG_ExecuteJob,
  634. priv->toc_entry_clock_table);
  635. /* actual downloading */
  636. cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
  637. return 0;
  638. }
  639. int cz_smu_upload_pptable(struct amdgpu_device *adev)
  640. {
  641. uint8_t i;
  642. struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
  643. for (i = 0; i < priv->scratch_buffer_length; i++)
  644. if (priv->scratch_buffer[i].firmware_ID ==
  645. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
  646. break;
  647. if (i >= priv->scratch_buffer_length) {
  648. dev_err(adev->dev, "Invalid Scratch Type\n");
  649. return -EINVAL;
  650. }
  651. /* prepare SMU */
  652. cz_send_msg_to_smc_with_parameter(adev,
  653. PPSMC_MSG_SetClkTableAddrHi,
  654. priv->scratch_buffer[i].mc_addr_high);
  655. cz_send_msg_to_smc_with_parameter(adev,
  656. PPSMC_MSG_SetClkTableAddrLo,
  657. priv->scratch_buffer[i].mc_addr_low);
  658. cz_send_msg_to_smc_with_parameter(adev,
  659. PPSMC_MSG_ExecuteJob,
  660. priv->toc_entry_clock_table);
  661. /* actual uploading */
  662. cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
  663. return 0;
  664. }
  665. /*
  666. * cz smumgr functions initialization
  667. */
  668. static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
  669. .check_fw_load_finish = cz_smu_check_finished,
  670. .request_smu_load_fw = NULL,
  671. .request_smu_specific_fw = NULL,
  672. };
  673. /*
  674. * cz smu initialization
  675. */
  676. int cz_smu_init(struct amdgpu_device *adev)
  677. {
  678. int ret = -EINVAL;
  679. uint64_t mc_addr = 0;
  680. struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
  681. struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
  682. void *toc_buf_ptr = NULL;
  683. void *smu_buf_ptr = NULL;
  684. struct cz_smu_private_data *priv =
  685. kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
  686. if (priv == NULL)
  687. return -ENOMEM;
  688. /* allocate firmware buffers */
  689. if (adev->firmware.smu_load)
  690. amdgpu_ucode_init_bo(adev);
  691. adev->smu.priv = priv;
  692. adev->smu.fw_flags = 0;
  693. priv->toc_buffer.data_size = 4096;
  694. priv->smu_buffer.data_size =
  695. ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
  696. ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
  697. ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
  698. ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
  699. ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
  700. /* prepare toc buffer and smu buffer:
  701. * 1. create amdgpu_bo for toc buffer and smu buffer
  702. * 2. pin mc address
  703. * 3. map kernel virtual address
  704. */
  705. ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
  706. true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
  707. toc_buf);
  708. if (ret) {
  709. dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
  710. return ret;
  711. }
  712. ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
  713. true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
  714. smu_buf);
  715. if (ret) {
  716. dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
  717. return ret;
  718. }
  719. /* toc buffer reserve/pin/map */
  720. ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
  721. if (ret) {
  722. amdgpu_bo_unref(&adev->smu.toc_buf);
  723. dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
  724. return ret;
  725. }
  726. ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
  727. if (ret) {
  728. amdgpu_bo_unreserve(adev->smu.toc_buf);
  729. amdgpu_bo_unref(&adev->smu.toc_buf);
  730. dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
  731. return ret;
  732. }
  733. ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
  734. if (ret)
  735. goto smu_init_failed;
  736. amdgpu_bo_unreserve(adev->smu.toc_buf);
  737. priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
  738. priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
  739. priv->toc_buffer.kaddr = toc_buf_ptr;
  740. /* smu buffer reserve/pin/map */
  741. ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
  742. if (ret) {
  743. amdgpu_bo_unref(&adev->smu.smu_buf);
  744. dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
  745. return ret;
  746. }
  747. ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
  748. if (ret) {
  749. amdgpu_bo_unreserve(adev->smu.smu_buf);
  750. amdgpu_bo_unref(&adev->smu.smu_buf);
  751. dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
  752. return ret;
  753. }
  754. ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
  755. if (ret)
  756. goto smu_init_failed;
  757. amdgpu_bo_unreserve(adev->smu.smu_buf);
  758. priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
  759. priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
  760. priv->smu_buffer.kaddr = smu_buf_ptr;
  761. if (adev->firmware.smu_load) {
  762. if (cz_smu_populate_single_firmware_entry(adev,
  763. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
  764. &priv->driver_buffer[priv->driver_buffer_length++]))
  765. goto smu_init_failed;
  766. if (adev->asic_type == CHIP_STONEY) {
  767. if (cz_smu_populate_single_firmware_entry(adev,
  768. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
  769. &priv->driver_buffer[priv->driver_buffer_length++]))
  770. goto smu_init_failed;
  771. } else {
  772. if (cz_smu_populate_single_firmware_entry(adev,
  773. CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
  774. &priv->driver_buffer[priv->driver_buffer_length++]))
  775. goto smu_init_failed;
  776. }
  777. if (cz_smu_populate_single_firmware_entry(adev,
  778. CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
  779. &priv->driver_buffer[priv->driver_buffer_length++]))
  780. goto smu_init_failed;
  781. if (cz_smu_populate_single_firmware_entry(adev,
  782. CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
  783. &priv->driver_buffer[priv->driver_buffer_length++]))
  784. goto smu_init_failed;
  785. if (cz_smu_populate_single_firmware_entry(adev,
  786. CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
  787. &priv->driver_buffer[priv->driver_buffer_length++]))
  788. goto smu_init_failed;
  789. if (cz_smu_populate_single_firmware_entry(adev,
  790. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
  791. &priv->driver_buffer[priv->driver_buffer_length++]))
  792. goto smu_init_failed;
  793. if (adev->asic_type == CHIP_STONEY) {
  794. if (cz_smu_populate_single_firmware_entry(adev,
  795. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
  796. &priv->driver_buffer[priv->driver_buffer_length++]))
  797. goto smu_init_failed;
  798. } else {
  799. if (cz_smu_populate_single_firmware_entry(adev,
  800. CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
  801. &priv->driver_buffer[priv->driver_buffer_length++]))
  802. goto smu_init_failed;
  803. }
  804. if (cz_smu_populate_single_firmware_entry(adev,
  805. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
  806. &priv->driver_buffer[priv->driver_buffer_length++]))
  807. goto smu_init_failed;
  808. }
  809. if (cz_smu_populate_single_scratch_entry(adev,
  810. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
  811. UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
  812. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  813. goto smu_init_failed;
  814. if (cz_smu_populate_single_scratch_entry(adev,
  815. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
  816. UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
  817. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  818. goto smu_init_failed;
  819. if (cz_smu_populate_single_scratch_entry(adev,
  820. CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
  821. UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
  822. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  823. goto smu_init_failed;
  824. if (cz_smu_populate_single_scratch_entry(adev,
  825. CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
  826. sizeof(struct SMU8_MultimediaPowerLogData),
  827. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  828. goto smu_init_failed;
  829. if (cz_smu_populate_single_scratch_entry(adev,
  830. CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
  831. sizeof(struct SMU8_Fusion_ClkTable),
  832. &priv->scratch_buffer[priv->scratch_buffer_length++]))
  833. goto smu_init_failed;
  834. cz_smu_initialize_toc_empty_job_list(adev);
  835. cz_smu_construct_toc_for_rlc_aram_save(adev);
  836. cz_smu_construct_toc_for_vddgfx_enter(adev);
  837. cz_smu_construct_toc_for_vddgfx_exit(adev);
  838. cz_smu_construct_toc_for_power_profiling(adev);
  839. cz_smu_construct_toc_for_bootup(adev);
  840. cz_smu_construct_toc_for_clock_table(adev);
  841. /* init the smumgr functions */
  842. adev->smu.smumgr_funcs = &cz_smumgr_funcs;
  843. return 0;
  844. smu_init_failed:
  845. amdgpu_bo_unref(toc_buf);
  846. amdgpu_bo_unref(smu_buf);
  847. return ret;
  848. }