rv740_dpm.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. /*
  2. * Copyright 2011 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include "drmP.h"
  25. #include "radeon.h"
  26. #include "rv740d.h"
  27. #include "r600_dpm.h"
  28. #include "rv770_dpm.h"
  29. #include "atom.h"
  30. struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
  31. u32 rv740_get_decoded_reference_divider(u32 encoded_ref)
  32. {
  33. u32 ref = 0;
  34. switch (encoded_ref) {
  35. case 0:
  36. ref = 1;
  37. break;
  38. case 16:
  39. ref = 2;
  40. break;
  41. case 17:
  42. ref = 3;
  43. break;
  44. case 18:
  45. ref = 2;
  46. break;
  47. case 19:
  48. ref = 3;
  49. break;
  50. case 20:
  51. ref = 4;
  52. break;
  53. case 21:
  54. ref = 5;
  55. break;
  56. default:
  57. DRM_ERROR("Invalid encoded Reference Divider\n");
  58. ref = 0;
  59. break;
  60. }
  61. return ref;
  62. }
  63. struct dll_speed_setting {
  64. u16 min;
  65. u16 max;
  66. u32 dll_speed;
  67. };
  68. static struct dll_speed_setting dll_speed_table[16] =
  69. {
  70. { 270, 320, 0x0f },
  71. { 240, 270, 0x0e },
  72. { 200, 240, 0x0d },
  73. { 180, 200, 0x0c },
  74. { 160, 180, 0x0b },
  75. { 140, 160, 0x0a },
  76. { 120, 140, 0x09 },
  77. { 110, 120, 0x08 },
  78. { 95, 110, 0x07 },
  79. { 85, 95, 0x06 },
  80. { 78, 85, 0x05 },
  81. { 70, 78, 0x04 },
  82. { 65, 70, 0x03 },
  83. { 60, 65, 0x02 },
  84. { 42, 60, 0x01 },
  85. { 00, 42, 0x00 }
  86. };
  87. u32 rv740_get_dll_speed(bool is_gddr5, u32 memory_clock)
  88. {
  89. int i;
  90. u32 factor;
  91. u16 data_rate;
  92. if (is_gddr5)
  93. factor = 4;
  94. else
  95. factor = 2;
  96. data_rate = (u16)(memory_clock * factor / 1000);
  97. if (data_rate < dll_speed_table[0].max) {
  98. for (i = 0; i < 16; i++) {
  99. if (data_rate > dll_speed_table[i].min &&
  100. data_rate <= dll_speed_table[i].max)
  101. return dll_speed_table[i].dll_speed;
  102. }
  103. }
  104. DRM_DEBUG_KMS("Target MCLK greater than largest MCLK in DLL speed table\n");
  105. return 0x0f;
  106. }
  107. int rv740_populate_sclk_value(struct radeon_device *rdev, u32 engine_clock,
  108. RV770_SMC_SCLK_VALUE *sclk)
  109. {
  110. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  111. struct atom_clock_dividers dividers;
  112. u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl;
  113. u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2;
  114. u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3;
  115. u32 cg_spll_spread_spectrum = pi->clk_regs.rv770.cg_spll_spread_spectrum;
  116. u32 cg_spll_spread_spectrum_2 = pi->clk_regs.rv770.cg_spll_spread_spectrum_2;
  117. u64 tmp;
  118. u32 reference_clock = rdev->clock.spll.reference_freq;
  119. u32 reference_divider;
  120. u32 fbdiv;
  121. int ret;
  122. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
  123. engine_clock, false, &dividers);
  124. if (ret)
  125. return ret;
  126. reference_divider = 1 + dividers.ref_div;
  127. tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
  128. do_div(tmp, reference_clock);
  129. fbdiv = (u32) tmp;
  130. spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
  131. spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
  132. spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
  133. spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
  134. spll_func_cntl_2 |= SCLK_MUX_SEL(2);
  135. spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
  136. spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
  137. spll_func_cntl_3 |= SPLL_DITHEN;
  138. if (pi->sclk_ss) {
  139. struct radeon_atom_ss ss;
  140. u32 vco_freq = engine_clock * dividers.post_div;
  141. if (radeon_atombios_get_asic_ss_info(rdev, &ss,
  142. ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
  143. u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
  144. u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
  145. cg_spll_spread_spectrum &= ~CLK_S_MASK;
  146. cg_spll_spread_spectrum |= CLK_S(clk_s);
  147. cg_spll_spread_spectrum |= SSEN;
  148. cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
  149. cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
  150. }
  151. }
  152. sclk->sclk_value = cpu_to_be32(engine_clock);
  153. sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
  154. sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
  155. sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
  156. sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
  157. sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
  158. return 0;
  159. }
  160. int rv740_populate_mclk_value(struct radeon_device *rdev,
  161. u32 engine_clock, u32 memory_clock,
  162. RV7XX_SMC_MCLK_VALUE *mclk)
  163. {
  164. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  165. u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl;
  166. u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2;
  167. u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl;
  168. u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2;
  169. u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl;
  170. u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
  171. u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1;
  172. u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2;
  173. struct atom_clock_dividers dividers;
  174. u32 ibias;
  175. u32 dll_speed;
  176. int ret;
  177. ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
  178. memory_clock, false, &dividers);
  179. if (ret)
  180. return ret;
  181. ibias = rv770_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
  182. mpll_ad_func_cntl &= ~(CLKR_MASK |
  183. YCLK_POST_DIV_MASK |
  184. CLKF_MASK |
  185. CLKFRAC_MASK |
  186. IBIAS_MASK);
  187. mpll_ad_func_cntl |= CLKR(dividers.ref_div);
  188. mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
  189. mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
  190. mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
  191. mpll_ad_func_cntl |= IBIAS(ibias);
  192. if (dividers.vco_mode)
  193. mpll_ad_func_cntl_2 |= VCO_MODE;
  194. else
  195. mpll_ad_func_cntl_2 &= ~VCO_MODE;
  196. if (pi->mem_gddr5) {
  197. mpll_dq_func_cntl &= ~(CLKR_MASK |
  198. YCLK_POST_DIV_MASK |
  199. CLKF_MASK |
  200. CLKFRAC_MASK |
  201. IBIAS_MASK);
  202. mpll_dq_func_cntl |= CLKR(dividers.ref_div);
  203. mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
  204. mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
  205. mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
  206. mpll_dq_func_cntl |= IBIAS(ibias);
  207. if (dividers.vco_mode)
  208. mpll_dq_func_cntl_2 |= VCO_MODE;
  209. else
  210. mpll_dq_func_cntl_2 &= ~VCO_MODE;
  211. }
  212. if (pi->mclk_ss) {
  213. struct radeon_atom_ss ss;
  214. u32 vco_freq = memory_clock * dividers.post_div;
  215. if (radeon_atombios_get_asic_ss_info(rdev, &ss,
  216. ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
  217. u32 reference_clock = rdev->clock.mpll.reference_freq;
  218. u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
  219. u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
  220. u32 clk_v = 0x40000 * ss.percentage *
  221. (dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000);
  222. mpll_ss1 &= ~CLKV_MASK;
  223. mpll_ss1 |= CLKV(clk_v);
  224. mpll_ss2 &= ~CLKS_MASK;
  225. mpll_ss2 |= CLKS(clk_s);
  226. }
  227. }
  228. dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
  229. memory_clock);
  230. mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
  231. mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
  232. mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
  233. mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
  234. mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
  235. mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
  236. mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
  237. mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
  238. mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
  239. mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1);
  240. mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
  241. return 0;
  242. }
  243. void rv740_read_clock_registers(struct radeon_device *rdev)
  244. {
  245. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  246. pi->clk_regs.rv770.cg_spll_func_cntl =
  247. RREG32(CG_SPLL_FUNC_CNTL);
  248. pi->clk_regs.rv770.cg_spll_func_cntl_2 =
  249. RREG32(CG_SPLL_FUNC_CNTL_2);
  250. pi->clk_regs.rv770.cg_spll_func_cntl_3 =
  251. RREG32(CG_SPLL_FUNC_CNTL_3);
  252. pi->clk_regs.rv770.cg_spll_spread_spectrum =
  253. RREG32(CG_SPLL_SPREAD_SPECTRUM);
  254. pi->clk_regs.rv770.cg_spll_spread_spectrum_2 =
  255. RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
  256. pi->clk_regs.rv770.mpll_ad_func_cntl =
  257. RREG32(MPLL_AD_FUNC_CNTL);
  258. pi->clk_regs.rv770.mpll_ad_func_cntl_2 =
  259. RREG32(MPLL_AD_FUNC_CNTL_2);
  260. pi->clk_regs.rv770.mpll_dq_func_cntl =
  261. RREG32(MPLL_DQ_FUNC_CNTL);
  262. pi->clk_regs.rv770.mpll_dq_func_cntl_2 =
  263. RREG32(MPLL_DQ_FUNC_CNTL_2);
  264. pi->clk_regs.rv770.mclk_pwrmgt_cntl =
  265. RREG32(MCLK_PWRMGT_CNTL);
  266. pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL);
  267. pi->clk_regs.rv770.mpll_ss1 = RREG32(MPLL_SS1);
  268. pi->clk_regs.rv770.mpll_ss2 = RREG32(MPLL_SS2);
  269. }
  270. int rv740_populate_smc_acpi_state(struct radeon_device *rdev,
  271. RV770_SMC_STATETABLE *table)
  272. {
  273. struct rv7xx_power_info *pi = rv770_get_pi(rdev);
  274. u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl;
  275. u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2;
  276. u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl;
  277. u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2;
  278. u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl;
  279. u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2;
  280. u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3;
  281. u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl;
  282. u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
  283. table->ACPIState = table->initialState;
  284. table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
  285. if (pi->acpi_vddc) {
  286. rv770_populate_vddc_value(rdev, pi->acpi_vddc,
  287. &table->ACPIState.levels[0].vddc);
  288. table->ACPIState.levels[0].gen2PCIE =
  289. pi->pcie_gen2 ?
  290. pi->acpi_pcie_gen2 : 0;
  291. table->ACPIState.levels[0].gen2XSP =
  292. pi->acpi_pcie_gen2;
  293. } else {
  294. rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
  295. &table->ACPIState.levels[0].vddc);
  296. table->ACPIState.levels[0].gen2PCIE = 0;
  297. }
  298. mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
  299. mpll_dq_func_cntl_2 |= BYPASS | BIAS_GEN_PDNB | RESET_EN;
  300. mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
  301. MRDCKA1_RESET |
  302. MRDCKB0_RESET |
  303. MRDCKB1_RESET |
  304. MRDCKC0_RESET |
  305. MRDCKC1_RESET |
  306. MRDCKD0_RESET |
  307. MRDCKD1_RESET);
  308. dll_cntl |= (MRDCKA0_BYPASS |
  309. MRDCKA1_BYPASS |
  310. MRDCKB0_BYPASS |
  311. MRDCKB1_BYPASS |
  312. MRDCKC0_BYPASS |
  313. MRDCKC1_BYPASS |
  314. MRDCKD0_BYPASS |
  315. MRDCKD1_BYPASS);
  316. spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
  317. spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
  318. spll_func_cntl_2 |= SCLK_MUX_SEL(4);
  319. table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
  320. table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
  321. table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
  322. table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
  323. table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
  324. table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
  325. table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
  326. table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
  327. table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
  328. table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
  329. table->ACPIState.levels[0].sclk.sclk_value = 0;
  330. table->ACPIState.levels[1] = table->ACPIState.levels[0];
  331. table->ACPIState.levels[2] = table->ACPIState.levels[0];
  332. rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
  333. return 0;
  334. }
  335. void rv740_enable_mclk_spread_spectrum(struct radeon_device *rdev,
  336. bool enable)
  337. {
  338. if (enable)
  339. WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN);
  340. else
  341. WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN);
  342. }
  343. u8 rv740_get_mclk_frequency_ratio(u32 memory_clock)
  344. {
  345. u8 mc_para_index;
  346. if ((memory_clock < 10000) || (memory_clock > 47500))
  347. mc_para_index = 0x00;
  348. else
  349. mc_para_index = (u8)((memory_clock - 10000) / 2500);
  350. return mc_para_index;
  351. }