ci_smc.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. /*
  2. * Copyright 2011 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include "drmP.h"
  26. #include "amdgpu.h"
  27. #include "cikd.h"
  28. #include "ppsmc.h"
  29. #include "amdgpu_ucode.h"
  30. #include "ci_dpm.h"
  31. #include "smu/smu_7_0_1_d.h"
  32. #include "smu/smu_7_0_1_sh_mask.h"
  33. static int ci_set_smc_sram_address(struct amdgpu_device *adev,
  34. u32 smc_address, u32 limit)
  35. {
  36. if (smc_address & 3)
  37. return -EINVAL;
  38. if ((smc_address + 3) > limit)
  39. return -EINVAL;
  40. WREG32(mmSMC_IND_INDEX_0, smc_address);
  41. WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
  42. return 0;
  43. }
  44. int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
  45. u32 smc_start_address,
  46. const u8 *src, u32 byte_count, u32 limit)
  47. {
  48. unsigned long flags;
  49. u32 data, original_data;
  50. u32 addr;
  51. u32 extra_shift;
  52. int ret = 0;
  53. if (smc_start_address & 3)
  54. return -EINVAL;
  55. if ((smc_start_address + byte_count) > limit)
  56. return -EINVAL;
  57. addr = smc_start_address;
  58. spin_lock_irqsave(&adev->smc_idx_lock, flags);
  59. while (byte_count >= 4) {
  60. /* SMC address space is BE */
  61. data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
  62. ret = ci_set_smc_sram_address(adev, addr, limit);
  63. if (ret)
  64. goto done;
  65. WREG32(mmSMC_IND_DATA_0, data);
  66. src += 4;
  67. byte_count -= 4;
  68. addr += 4;
  69. }
  70. /* RMW for the final bytes */
  71. if (byte_count > 0) {
  72. data = 0;
  73. ret = ci_set_smc_sram_address(adev, addr, limit);
  74. if (ret)
  75. goto done;
  76. original_data = RREG32(mmSMC_IND_DATA_0);
  77. extra_shift = 8 * (4 - byte_count);
  78. while (byte_count > 0) {
  79. data = (data << 8) + *src++;
  80. byte_count--;
  81. }
  82. data <<= extra_shift;
  83. data |= (original_data & ~((~0UL) << extra_shift));
  84. ret = ci_set_smc_sram_address(adev, addr, limit);
  85. if (ret)
  86. goto done;
  87. WREG32(mmSMC_IND_DATA_0, data);
  88. }
  89. done:
  90. spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
  91. return ret;
  92. }
  93. void amdgpu_ci_start_smc(struct amdgpu_device *adev)
  94. {
  95. u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
  96. tmp &= ~SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
  97. WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
  98. }
  99. void amdgpu_ci_reset_smc(struct amdgpu_device *adev)
  100. {
  101. u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
  102. tmp |= SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
  103. WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
  104. }
  105. int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev)
  106. {
  107. static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
  108. return amdgpu_ci_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
  109. }
  110. void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev)
  111. {
  112. u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
  113. tmp |= SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
  114. WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
  115. }
  116. void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev)
  117. {
  118. u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
  119. tmp &= ~SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
  120. WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
  121. }
  122. bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev)
  123. {
  124. u32 clk = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
  125. u32 pc_c = RREG32_SMC(ixSMC_PC_C);
  126. if (!(clk & SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK) && (0x20100 <= pc_c))
  127. return true;
  128. return false;
  129. }
  130. PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
  131. {
  132. u32 tmp;
  133. int i;
  134. if (!amdgpu_ci_is_smc_running(adev))
  135. return PPSMC_Result_Failed;
  136. WREG32(mmSMC_MESSAGE_0, msg);
  137. for (i = 0; i < adev->usec_timeout; i++) {
  138. tmp = RREG32(mmSMC_RESP_0);
  139. if (tmp != 0)
  140. break;
  141. udelay(1);
  142. }
  143. tmp = RREG32(mmSMC_RESP_0);
  144. return (PPSMC_Result)tmp;
  145. }
  146. PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev)
  147. {
  148. u32 tmp;
  149. int i;
  150. if (!amdgpu_ci_is_smc_running(adev))
  151. return PPSMC_Result_OK;
  152. for (i = 0; i < adev->usec_timeout; i++) {
  153. tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
  154. if ((tmp & SMC_SYSCON_CLOCK_CNTL_0__cken_MASK) == 0)
  155. break;
  156. udelay(1);
  157. }
  158. return PPSMC_Result_OK;
  159. }
  160. int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
  161. {
  162. const struct smc_firmware_header_v1_0 *hdr;
  163. unsigned long flags;
  164. u32 ucode_start_address;
  165. u32 ucode_size;
  166. const u8 *src;
  167. u32 data;
  168. if (!adev->pm.fw)
  169. return -EINVAL;
  170. hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
  171. amdgpu_ucode_print_smc_hdr(&hdr->header);
  172. adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
  173. ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
  174. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
  175. src = (const u8 *)
  176. (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  177. if (ucode_size & 3)
  178. return -EINVAL;
  179. spin_lock_irqsave(&adev->smc_idx_lock, flags);
  180. WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
  181. WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK,
  182. ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
  183. while (ucode_size >= 4) {
  184. /* SMC address space is BE */
  185. data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
  186. WREG32(mmSMC_IND_DATA_0, data);
  187. src += 4;
  188. ucode_size -= 4;
  189. }
  190. WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
  191. spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
  192. return 0;
  193. }
  194. int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
  195. u32 smc_address, u32 *value, u32 limit)
  196. {
  197. unsigned long flags;
  198. int ret;
  199. spin_lock_irqsave(&adev->smc_idx_lock, flags);
  200. ret = ci_set_smc_sram_address(adev, smc_address, limit);
  201. if (ret == 0)
  202. *value = RREG32(mmSMC_IND_DATA_0);
  203. spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
  204. return ret;
  205. }
  206. int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
  207. u32 smc_address, u32 value, u32 limit)
  208. {
  209. unsigned long flags;
  210. int ret;
  211. spin_lock_irqsave(&adev->smc_idx_lock, flags);
  212. ret = ci_set_smc_sram_address(adev, smc_address, limit);
  213. if (ret == 0)
  214. WREG32(mmSMC_IND_DATA_0, value);
  215. spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
  216. return ret;
  217. }