cik.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521
  1. /*
  2. * Copyright 2012 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include "drmP.h"
  28. #include "amdgpu.h"
  29. #include "amdgpu_atombios.h"
  30. #include "amdgpu_ih.h"
  31. #include "amdgpu_uvd.h"
  32. #include "amdgpu_vce.h"
  33. #include "cikd.h"
  34. #include "atom.h"
  35. #include "cik.h"
  36. #include "gmc_v7_0.h"
  37. #include "cik_ih.h"
  38. #include "dce_v8_0.h"
  39. #include "gfx_v7_0.h"
  40. #include "cik_sdma.h"
  41. #include "uvd_v4_2.h"
  42. #include "vce_v2_0.h"
  43. #include "cik_dpm.h"
  44. #include "uvd/uvd_4_2_d.h"
  45. #include "smu/smu_7_0_1_d.h"
  46. #include "smu/smu_7_0_1_sh_mask.h"
  47. #include "dce/dce_8_0_d.h"
  48. #include "dce/dce_8_0_sh_mask.h"
  49. #include "bif/bif_4_1_d.h"
  50. #include "bif/bif_4_1_sh_mask.h"
  51. #include "gca/gfx_7_2_d.h"
  52. #include "gca/gfx_7_2_enum.h"
  53. #include "gca/gfx_7_2_sh_mask.h"
  54. #include "gmc/gmc_7_1_d.h"
  55. #include "gmc/gmc_7_1_sh_mask.h"
  56. #include "oss/oss_2_0_d.h"
  57. #include "oss/oss_2_0_sh_mask.h"
  58. #include "amdgpu_amdkfd.h"
  59. /*
  60. * Indirect registers accessor
  61. */
  62. static u32 cik_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  63. {
  64. unsigned long flags;
  65. u32 r;
  66. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  67. WREG32(mmPCIE_INDEX, reg);
  68. (void)RREG32(mmPCIE_INDEX);
  69. r = RREG32(mmPCIE_DATA);
  70. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  71. return r;
  72. }
  73. static void cik_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  74. {
  75. unsigned long flags;
  76. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  77. WREG32(mmPCIE_INDEX, reg);
  78. (void)RREG32(mmPCIE_INDEX);
  79. WREG32(mmPCIE_DATA, v);
  80. (void)RREG32(mmPCIE_DATA);
  81. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  82. }
  83. static u32 cik_smc_rreg(struct amdgpu_device *adev, u32 reg)
  84. {
  85. unsigned long flags;
  86. u32 r;
  87. spin_lock_irqsave(&adev->smc_idx_lock, flags);
  88. WREG32(mmSMC_IND_INDEX_0, (reg));
  89. r = RREG32(mmSMC_IND_DATA_0);
  90. spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
  91. return r;
  92. }
  93. static void cik_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  94. {
  95. unsigned long flags;
  96. spin_lock_irqsave(&adev->smc_idx_lock, flags);
  97. WREG32(mmSMC_IND_INDEX_0, (reg));
  98. WREG32(mmSMC_IND_DATA_0, (v));
  99. spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
  100. }
  101. static u32 cik_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
  102. {
  103. unsigned long flags;
  104. u32 r;
  105. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  106. WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
  107. r = RREG32(mmUVD_CTX_DATA);
  108. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  109. return r;
  110. }
  111. static void cik_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  112. {
  113. unsigned long flags;
  114. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  115. WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
  116. WREG32(mmUVD_CTX_DATA, (v));
  117. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  118. }
  119. static u32 cik_didt_rreg(struct amdgpu_device *adev, u32 reg)
  120. {
  121. unsigned long flags;
  122. u32 r;
  123. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  124. WREG32(mmDIDT_IND_INDEX, (reg));
  125. r = RREG32(mmDIDT_IND_DATA);
  126. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  127. return r;
  128. }
  129. static void cik_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  130. {
  131. unsigned long flags;
  132. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  133. WREG32(mmDIDT_IND_INDEX, (reg));
  134. WREG32(mmDIDT_IND_DATA, (v));
  135. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  136. }
  137. static const u32 bonaire_golden_spm_registers[] =
  138. {
  139. 0xc200, 0xe0ffffff, 0xe0000000
  140. };
  141. static const u32 bonaire_golden_common_registers[] =
  142. {
  143. 0x31dc, 0xffffffff, 0x00000800,
  144. 0x31dd, 0xffffffff, 0x00000800,
  145. 0x31e6, 0xffffffff, 0x00007fbf,
  146. 0x31e7, 0xffffffff, 0x00007faf
  147. };
  148. static const u32 bonaire_golden_registers[] =
  149. {
  150. 0xcd5, 0x00000333, 0x00000333,
  151. 0xcd4, 0x000c0fc0, 0x00040200,
  152. 0x2684, 0x00010000, 0x00058208,
  153. 0xf000, 0xffff1fff, 0x00140000,
  154. 0xf080, 0xfdfc0fff, 0x00000100,
  155. 0xf08d, 0x40000000, 0x40000200,
  156. 0x260c, 0xffffffff, 0x00000000,
  157. 0x260d, 0xf00fffff, 0x00000400,
  158. 0x260e, 0x0002021c, 0x00020200,
  159. 0x31e, 0x00000080, 0x00000000,
  160. 0x16ec, 0x000000f0, 0x00000070,
  161. 0x16f0, 0xf0311fff, 0x80300000,
  162. 0x263e, 0x73773777, 0x12010001,
  163. 0xd43, 0x00810000, 0x408af000,
  164. 0x1c0c, 0x31000111, 0x00000011,
  165. 0xbd2, 0x73773777, 0x12010001,
  166. 0x883, 0x00007fb6, 0x0021a1b1,
  167. 0x884, 0x00007fb6, 0x002021b1,
  168. 0x860, 0x00007fb6, 0x00002191,
  169. 0x886, 0x00007fb6, 0x002121b1,
  170. 0x887, 0x00007fb6, 0x002021b1,
  171. 0x877, 0x00007fb6, 0x00002191,
  172. 0x878, 0x00007fb6, 0x00002191,
  173. 0xd8a, 0x0000003f, 0x0000000a,
  174. 0xd8b, 0x0000003f, 0x0000000a,
  175. 0xab9, 0x00073ffe, 0x000022a2,
  176. 0x903, 0x000007ff, 0x00000000,
  177. 0x2285, 0xf000003f, 0x00000007,
  178. 0x22fc, 0x00002001, 0x00000001,
  179. 0x22c9, 0xffffffff, 0x00ffffff,
  180. 0xc281, 0x0000ff0f, 0x00000000,
  181. 0xa293, 0x07ffffff, 0x06000000,
  182. 0x136, 0x00000fff, 0x00000100,
  183. 0xf9e, 0x00000001, 0x00000002,
  184. 0x2440, 0x03000000, 0x0362c688,
  185. 0x2300, 0x000000ff, 0x00000001,
  186. 0x390, 0x00001fff, 0x00001fff,
  187. 0x2418, 0x0000007f, 0x00000020,
  188. 0x2542, 0x00010000, 0x00010000,
  189. 0x2b05, 0x000003ff, 0x000000f3,
  190. 0x2b03, 0xffffffff, 0x00001032
  191. };
  192. static const u32 bonaire_mgcg_cgcg_init[] =
  193. {
  194. 0x3108, 0xffffffff, 0xfffffffc,
  195. 0xc200, 0xffffffff, 0xe0000000,
  196. 0xf0a8, 0xffffffff, 0x00000100,
  197. 0xf082, 0xffffffff, 0x00000100,
  198. 0xf0b0, 0xffffffff, 0xc0000100,
  199. 0xf0b2, 0xffffffff, 0xc0000100,
  200. 0xf0b1, 0xffffffff, 0xc0000100,
  201. 0x1579, 0xffffffff, 0x00600100,
  202. 0xf0a0, 0xffffffff, 0x00000100,
  203. 0xf085, 0xffffffff, 0x06000100,
  204. 0xf088, 0xffffffff, 0x00000100,
  205. 0xf086, 0xffffffff, 0x06000100,
  206. 0xf081, 0xffffffff, 0x00000100,
  207. 0xf0b8, 0xffffffff, 0x00000100,
  208. 0xf089, 0xffffffff, 0x00000100,
  209. 0xf080, 0xffffffff, 0x00000100,
  210. 0xf08c, 0xffffffff, 0x00000100,
  211. 0xf08d, 0xffffffff, 0x00000100,
  212. 0xf094, 0xffffffff, 0x00000100,
  213. 0xf095, 0xffffffff, 0x00000100,
  214. 0xf096, 0xffffffff, 0x00000100,
  215. 0xf097, 0xffffffff, 0x00000100,
  216. 0xf098, 0xffffffff, 0x00000100,
  217. 0xf09f, 0xffffffff, 0x00000100,
  218. 0xf09e, 0xffffffff, 0x00000100,
  219. 0xf084, 0xffffffff, 0x06000100,
  220. 0xf0a4, 0xffffffff, 0x00000100,
  221. 0xf09d, 0xffffffff, 0x00000100,
  222. 0xf0ad, 0xffffffff, 0x00000100,
  223. 0xf0ac, 0xffffffff, 0x00000100,
  224. 0xf09c, 0xffffffff, 0x00000100,
  225. 0xc200, 0xffffffff, 0xe0000000,
  226. 0xf008, 0xffffffff, 0x00010000,
  227. 0xf009, 0xffffffff, 0x00030002,
  228. 0xf00a, 0xffffffff, 0x00040007,
  229. 0xf00b, 0xffffffff, 0x00060005,
  230. 0xf00c, 0xffffffff, 0x00090008,
  231. 0xf00d, 0xffffffff, 0x00010000,
  232. 0xf00e, 0xffffffff, 0x00030002,
  233. 0xf00f, 0xffffffff, 0x00040007,
  234. 0xf010, 0xffffffff, 0x00060005,
  235. 0xf011, 0xffffffff, 0x00090008,
  236. 0xf012, 0xffffffff, 0x00010000,
  237. 0xf013, 0xffffffff, 0x00030002,
  238. 0xf014, 0xffffffff, 0x00040007,
  239. 0xf015, 0xffffffff, 0x00060005,
  240. 0xf016, 0xffffffff, 0x00090008,
  241. 0xf017, 0xffffffff, 0x00010000,
  242. 0xf018, 0xffffffff, 0x00030002,
  243. 0xf019, 0xffffffff, 0x00040007,
  244. 0xf01a, 0xffffffff, 0x00060005,
  245. 0xf01b, 0xffffffff, 0x00090008,
  246. 0xf01c, 0xffffffff, 0x00010000,
  247. 0xf01d, 0xffffffff, 0x00030002,
  248. 0xf01e, 0xffffffff, 0x00040007,
  249. 0xf01f, 0xffffffff, 0x00060005,
  250. 0xf020, 0xffffffff, 0x00090008,
  251. 0xf021, 0xffffffff, 0x00010000,
  252. 0xf022, 0xffffffff, 0x00030002,
  253. 0xf023, 0xffffffff, 0x00040007,
  254. 0xf024, 0xffffffff, 0x00060005,
  255. 0xf025, 0xffffffff, 0x00090008,
  256. 0xf026, 0xffffffff, 0x00010000,
  257. 0xf027, 0xffffffff, 0x00030002,
  258. 0xf028, 0xffffffff, 0x00040007,
  259. 0xf029, 0xffffffff, 0x00060005,
  260. 0xf02a, 0xffffffff, 0x00090008,
  261. 0xf000, 0xffffffff, 0x96e00200,
  262. 0x21c2, 0xffffffff, 0x00900100,
  263. 0x3109, 0xffffffff, 0x0020003f,
  264. 0xe, 0xffffffff, 0x0140001c,
  265. 0xf, 0x000f0000, 0x000f0000,
  266. 0x88, 0xffffffff, 0xc060000c,
  267. 0x89, 0xc0000fff, 0x00000100,
  268. 0x3e4, 0xffffffff, 0x00000100,
  269. 0x3e6, 0x00000101, 0x00000000,
  270. 0x82a, 0xffffffff, 0x00000104,
  271. 0x1579, 0xff000fff, 0x00000100,
  272. 0xc33, 0xc0000fff, 0x00000104,
  273. 0x3079, 0x00000001, 0x00000001,
  274. 0x3403, 0xff000ff0, 0x00000100,
  275. 0x3603, 0xff000ff0, 0x00000100
  276. };
  277. static const u32 spectre_golden_spm_registers[] =
  278. {
  279. 0xc200, 0xe0ffffff, 0xe0000000
  280. };
  281. static const u32 spectre_golden_common_registers[] =
  282. {
  283. 0x31dc, 0xffffffff, 0x00000800,
  284. 0x31dd, 0xffffffff, 0x00000800,
  285. 0x31e6, 0xffffffff, 0x00007fbf,
  286. 0x31e7, 0xffffffff, 0x00007faf
  287. };
  288. static const u32 spectre_golden_registers[] =
  289. {
  290. 0xf000, 0xffff1fff, 0x96940200,
  291. 0xf003, 0xffff0001, 0xff000000,
  292. 0xf080, 0xfffc0fff, 0x00000100,
  293. 0x1bb6, 0x00010101, 0x00010000,
  294. 0x260d, 0xf00fffff, 0x00000400,
  295. 0x260e, 0xfffffffc, 0x00020200,
  296. 0x16ec, 0x000000f0, 0x00000070,
  297. 0x16f0, 0xf0311fff, 0x80300000,
  298. 0x263e, 0x73773777, 0x12010001,
  299. 0x26df, 0x00ff0000, 0x00fc0000,
  300. 0xbd2, 0x73773777, 0x12010001,
  301. 0x2285, 0xf000003f, 0x00000007,
  302. 0x22c9, 0xffffffff, 0x00ffffff,
  303. 0xa0d4, 0x3f3f3fff, 0x00000082,
  304. 0xa0d5, 0x0000003f, 0x00000000,
  305. 0xf9e, 0x00000001, 0x00000002,
  306. 0x244f, 0xffff03df, 0x00000004,
  307. 0x31da, 0x00000008, 0x00000008,
  308. 0x2300, 0x000008ff, 0x00000800,
  309. 0x2542, 0x00010000, 0x00010000,
  310. 0x2b03, 0xffffffff, 0x54763210,
  311. 0x853e, 0x01ff01ff, 0x00000002,
  312. 0x8526, 0x007ff800, 0x00200000,
  313. 0x8057, 0xffffffff, 0x00000f40,
  314. 0xc24d, 0xffffffff, 0x00000001
  315. };
  316. static const u32 spectre_mgcg_cgcg_init[] =
  317. {
  318. 0x3108, 0xffffffff, 0xfffffffc,
  319. 0xc200, 0xffffffff, 0xe0000000,
  320. 0xf0a8, 0xffffffff, 0x00000100,
  321. 0xf082, 0xffffffff, 0x00000100,
  322. 0xf0b0, 0xffffffff, 0x00000100,
  323. 0xf0b2, 0xffffffff, 0x00000100,
  324. 0xf0b1, 0xffffffff, 0x00000100,
  325. 0x1579, 0xffffffff, 0x00600100,
  326. 0xf0a0, 0xffffffff, 0x00000100,
  327. 0xf085, 0xffffffff, 0x06000100,
  328. 0xf088, 0xffffffff, 0x00000100,
  329. 0xf086, 0xffffffff, 0x06000100,
  330. 0xf081, 0xffffffff, 0x00000100,
  331. 0xf0b8, 0xffffffff, 0x00000100,
  332. 0xf089, 0xffffffff, 0x00000100,
  333. 0xf080, 0xffffffff, 0x00000100,
  334. 0xf08c, 0xffffffff, 0x00000100,
  335. 0xf08d, 0xffffffff, 0x00000100,
  336. 0xf094, 0xffffffff, 0x00000100,
  337. 0xf095, 0xffffffff, 0x00000100,
  338. 0xf096, 0xffffffff, 0x00000100,
  339. 0xf097, 0xffffffff, 0x00000100,
  340. 0xf098, 0xffffffff, 0x00000100,
  341. 0xf09f, 0xffffffff, 0x00000100,
  342. 0xf09e, 0xffffffff, 0x00000100,
  343. 0xf084, 0xffffffff, 0x06000100,
  344. 0xf0a4, 0xffffffff, 0x00000100,
  345. 0xf09d, 0xffffffff, 0x00000100,
  346. 0xf0ad, 0xffffffff, 0x00000100,
  347. 0xf0ac, 0xffffffff, 0x00000100,
  348. 0xf09c, 0xffffffff, 0x00000100,
  349. 0xc200, 0xffffffff, 0xe0000000,
  350. 0xf008, 0xffffffff, 0x00010000,
  351. 0xf009, 0xffffffff, 0x00030002,
  352. 0xf00a, 0xffffffff, 0x00040007,
  353. 0xf00b, 0xffffffff, 0x00060005,
  354. 0xf00c, 0xffffffff, 0x00090008,
  355. 0xf00d, 0xffffffff, 0x00010000,
  356. 0xf00e, 0xffffffff, 0x00030002,
  357. 0xf00f, 0xffffffff, 0x00040007,
  358. 0xf010, 0xffffffff, 0x00060005,
  359. 0xf011, 0xffffffff, 0x00090008,
  360. 0xf012, 0xffffffff, 0x00010000,
  361. 0xf013, 0xffffffff, 0x00030002,
  362. 0xf014, 0xffffffff, 0x00040007,
  363. 0xf015, 0xffffffff, 0x00060005,
  364. 0xf016, 0xffffffff, 0x00090008,
  365. 0xf017, 0xffffffff, 0x00010000,
  366. 0xf018, 0xffffffff, 0x00030002,
  367. 0xf019, 0xffffffff, 0x00040007,
  368. 0xf01a, 0xffffffff, 0x00060005,
  369. 0xf01b, 0xffffffff, 0x00090008,
  370. 0xf01c, 0xffffffff, 0x00010000,
  371. 0xf01d, 0xffffffff, 0x00030002,
  372. 0xf01e, 0xffffffff, 0x00040007,
  373. 0xf01f, 0xffffffff, 0x00060005,
  374. 0xf020, 0xffffffff, 0x00090008,
  375. 0xf021, 0xffffffff, 0x00010000,
  376. 0xf022, 0xffffffff, 0x00030002,
  377. 0xf023, 0xffffffff, 0x00040007,
  378. 0xf024, 0xffffffff, 0x00060005,
  379. 0xf025, 0xffffffff, 0x00090008,
  380. 0xf026, 0xffffffff, 0x00010000,
  381. 0xf027, 0xffffffff, 0x00030002,
  382. 0xf028, 0xffffffff, 0x00040007,
  383. 0xf029, 0xffffffff, 0x00060005,
  384. 0xf02a, 0xffffffff, 0x00090008,
  385. 0xf02b, 0xffffffff, 0x00010000,
  386. 0xf02c, 0xffffffff, 0x00030002,
  387. 0xf02d, 0xffffffff, 0x00040007,
  388. 0xf02e, 0xffffffff, 0x00060005,
  389. 0xf02f, 0xffffffff, 0x00090008,
  390. 0xf000, 0xffffffff, 0x96e00200,
  391. 0x21c2, 0xffffffff, 0x00900100,
  392. 0x3109, 0xffffffff, 0x0020003f,
  393. 0xe, 0xffffffff, 0x0140001c,
  394. 0xf, 0x000f0000, 0x000f0000,
  395. 0x88, 0xffffffff, 0xc060000c,
  396. 0x89, 0xc0000fff, 0x00000100,
  397. 0x3e4, 0xffffffff, 0x00000100,
  398. 0x3e6, 0x00000101, 0x00000000,
  399. 0x82a, 0xffffffff, 0x00000104,
  400. 0x1579, 0xff000fff, 0x00000100,
  401. 0xc33, 0xc0000fff, 0x00000104,
  402. 0x3079, 0x00000001, 0x00000001,
  403. 0x3403, 0xff000ff0, 0x00000100,
  404. 0x3603, 0xff000ff0, 0x00000100
  405. };
  406. static const u32 kalindi_golden_spm_registers[] =
  407. {
  408. 0xc200, 0xe0ffffff, 0xe0000000
  409. };
  410. static const u32 kalindi_golden_common_registers[] =
  411. {
  412. 0x31dc, 0xffffffff, 0x00000800,
  413. 0x31dd, 0xffffffff, 0x00000800,
  414. 0x31e6, 0xffffffff, 0x00007fbf,
  415. 0x31e7, 0xffffffff, 0x00007faf
  416. };
  417. static const u32 kalindi_golden_registers[] =
  418. {
  419. 0xf000, 0xffffdfff, 0x6e944040,
  420. 0x1579, 0xff607fff, 0xfc000100,
  421. 0xf088, 0xff000fff, 0x00000100,
  422. 0xf089, 0xff000fff, 0x00000100,
  423. 0xf080, 0xfffc0fff, 0x00000100,
  424. 0x1bb6, 0x00010101, 0x00010000,
  425. 0x260c, 0xffffffff, 0x00000000,
  426. 0x260d, 0xf00fffff, 0x00000400,
  427. 0x16ec, 0x000000f0, 0x00000070,
  428. 0x16f0, 0xf0311fff, 0x80300000,
  429. 0x263e, 0x73773777, 0x12010001,
  430. 0x263f, 0xffffffff, 0x00000010,
  431. 0x26df, 0x00ff0000, 0x00fc0000,
  432. 0x200c, 0x00001f0f, 0x0000100a,
  433. 0xbd2, 0x73773777, 0x12010001,
  434. 0x902, 0x000fffff, 0x000c007f,
  435. 0x2285, 0xf000003f, 0x00000007,
  436. 0x22c9, 0x3fff3fff, 0x00ffcfff,
  437. 0xc281, 0x0000ff0f, 0x00000000,
  438. 0xa293, 0x07ffffff, 0x06000000,
  439. 0x136, 0x00000fff, 0x00000100,
  440. 0xf9e, 0x00000001, 0x00000002,
  441. 0x31da, 0x00000008, 0x00000008,
  442. 0x2300, 0x000000ff, 0x00000003,
  443. 0x853e, 0x01ff01ff, 0x00000002,
  444. 0x8526, 0x007ff800, 0x00200000,
  445. 0x8057, 0xffffffff, 0x00000f40,
  446. 0x2231, 0x001f3ae3, 0x00000082,
  447. 0x2235, 0x0000001f, 0x00000010,
  448. 0xc24d, 0xffffffff, 0x00000000
  449. };
  450. static const u32 kalindi_mgcg_cgcg_init[] =
  451. {
  452. 0x3108, 0xffffffff, 0xfffffffc,
  453. 0xc200, 0xffffffff, 0xe0000000,
  454. 0xf0a8, 0xffffffff, 0x00000100,
  455. 0xf082, 0xffffffff, 0x00000100,
  456. 0xf0b0, 0xffffffff, 0x00000100,
  457. 0xf0b2, 0xffffffff, 0x00000100,
  458. 0xf0b1, 0xffffffff, 0x00000100,
  459. 0x1579, 0xffffffff, 0x00600100,
  460. 0xf0a0, 0xffffffff, 0x00000100,
  461. 0xf085, 0xffffffff, 0x06000100,
  462. 0xf088, 0xffffffff, 0x00000100,
  463. 0xf086, 0xffffffff, 0x06000100,
  464. 0xf081, 0xffffffff, 0x00000100,
  465. 0xf0b8, 0xffffffff, 0x00000100,
  466. 0xf089, 0xffffffff, 0x00000100,
  467. 0xf080, 0xffffffff, 0x00000100,
  468. 0xf08c, 0xffffffff, 0x00000100,
  469. 0xf08d, 0xffffffff, 0x00000100,
  470. 0xf094, 0xffffffff, 0x00000100,
  471. 0xf095, 0xffffffff, 0x00000100,
  472. 0xf096, 0xffffffff, 0x00000100,
  473. 0xf097, 0xffffffff, 0x00000100,
  474. 0xf098, 0xffffffff, 0x00000100,
  475. 0xf09f, 0xffffffff, 0x00000100,
  476. 0xf09e, 0xffffffff, 0x00000100,
  477. 0xf084, 0xffffffff, 0x06000100,
  478. 0xf0a4, 0xffffffff, 0x00000100,
  479. 0xf09d, 0xffffffff, 0x00000100,
  480. 0xf0ad, 0xffffffff, 0x00000100,
  481. 0xf0ac, 0xffffffff, 0x00000100,
  482. 0xf09c, 0xffffffff, 0x00000100,
  483. 0xc200, 0xffffffff, 0xe0000000,
  484. 0xf008, 0xffffffff, 0x00010000,
  485. 0xf009, 0xffffffff, 0x00030002,
  486. 0xf00a, 0xffffffff, 0x00040007,
  487. 0xf00b, 0xffffffff, 0x00060005,
  488. 0xf00c, 0xffffffff, 0x00090008,
  489. 0xf00d, 0xffffffff, 0x00010000,
  490. 0xf00e, 0xffffffff, 0x00030002,
  491. 0xf00f, 0xffffffff, 0x00040007,
  492. 0xf010, 0xffffffff, 0x00060005,
  493. 0xf011, 0xffffffff, 0x00090008,
  494. 0xf000, 0xffffffff, 0x96e00200,
  495. 0x21c2, 0xffffffff, 0x00900100,
  496. 0x3109, 0xffffffff, 0x0020003f,
  497. 0xe, 0xffffffff, 0x0140001c,
  498. 0xf, 0x000f0000, 0x000f0000,
  499. 0x88, 0xffffffff, 0xc060000c,
  500. 0x89, 0xc0000fff, 0x00000100,
  501. 0x82a, 0xffffffff, 0x00000104,
  502. 0x1579, 0xff000fff, 0x00000100,
  503. 0xc33, 0xc0000fff, 0x00000104,
  504. 0x3079, 0x00000001, 0x00000001,
  505. 0x3403, 0xff000ff0, 0x00000100,
  506. 0x3603, 0xff000ff0, 0x00000100
  507. };
  508. static const u32 hawaii_golden_spm_registers[] =
  509. {
  510. 0xc200, 0xe0ffffff, 0xe0000000
  511. };
  512. static const u32 hawaii_golden_common_registers[] =
  513. {
  514. 0xc200, 0xffffffff, 0xe0000000,
  515. 0xa0d4, 0xffffffff, 0x3a00161a,
  516. 0xa0d5, 0xffffffff, 0x0000002e,
  517. 0x2684, 0xffffffff, 0x00018208,
  518. 0x263e, 0xffffffff, 0x12011003
  519. };
  520. static const u32 hawaii_golden_registers[] =
  521. {
  522. 0xcd5, 0x00000333, 0x00000333,
  523. 0x2684, 0x00010000, 0x00058208,
  524. 0x260c, 0xffffffff, 0x00000000,
  525. 0x260d, 0xf00fffff, 0x00000400,
  526. 0x260e, 0x0002021c, 0x00020200,
  527. 0x31e, 0x00000080, 0x00000000,
  528. 0x16ec, 0x000000f0, 0x00000070,
  529. 0x16f0, 0xf0311fff, 0x80300000,
  530. 0xd43, 0x00810000, 0x408af000,
  531. 0x1c0c, 0x31000111, 0x00000011,
  532. 0xbd2, 0x73773777, 0x12010001,
  533. 0x848, 0x0000007f, 0x0000001b,
  534. 0x877, 0x00007fb6, 0x00002191,
  535. 0xd8a, 0x0000003f, 0x0000000a,
  536. 0xd8b, 0x0000003f, 0x0000000a,
  537. 0xab9, 0x00073ffe, 0x000022a2,
  538. 0x903, 0x000007ff, 0x00000000,
  539. 0x22fc, 0x00002001, 0x00000001,
  540. 0x22c9, 0xffffffff, 0x00ffffff,
  541. 0xc281, 0x0000ff0f, 0x00000000,
  542. 0xa293, 0x07ffffff, 0x06000000,
  543. 0xf9e, 0x00000001, 0x00000002,
  544. 0x31da, 0x00000008, 0x00000008,
  545. 0x31dc, 0x00000f00, 0x00000800,
  546. 0x31dd, 0x00000f00, 0x00000800,
  547. 0x31e6, 0x00ffffff, 0x00ff7fbf,
  548. 0x31e7, 0x00ffffff, 0x00ff7faf,
  549. 0x2300, 0x000000ff, 0x00000800,
  550. 0x390, 0x00001fff, 0x00001fff,
  551. 0x2418, 0x0000007f, 0x00000020,
  552. 0x2542, 0x00010000, 0x00010000,
  553. 0x2b80, 0x00100000, 0x000ff07c,
  554. 0x2b05, 0x000003ff, 0x0000000f,
  555. 0x2b04, 0xffffffff, 0x7564fdec,
  556. 0x2b03, 0xffffffff, 0x3120b9a8,
  557. 0x2b02, 0x20000000, 0x0f9c0000
  558. };
  559. static const u32 hawaii_mgcg_cgcg_init[] =
  560. {
  561. 0x3108, 0xffffffff, 0xfffffffd,
  562. 0xc200, 0xffffffff, 0xe0000000,
  563. 0xf0a8, 0xffffffff, 0x00000100,
  564. 0xf082, 0xffffffff, 0x00000100,
  565. 0xf0b0, 0xffffffff, 0x00000100,
  566. 0xf0b2, 0xffffffff, 0x00000100,
  567. 0xf0b1, 0xffffffff, 0x00000100,
  568. 0x1579, 0xffffffff, 0x00200100,
  569. 0xf0a0, 0xffffffff, 0x00000100,
  570. 0xf085, 0xffffffff, 0x06000100,
  571. 0xf088, 0xffffffff, 0x00000100,
  572. 0xf086, 0xffffffff, 0x06000100,
  573. 0xf081, 0xffffffff, 0x00000100,
  574. 0xf0b8, 0xffffffff, 0x00000100,
  575. 0xf089, 0xffffffff, 0x00000100,
  576. 0xf080, 0xffffffff, 0x00000100,
  577. 0xf08c, 0xffffffff, 0x00000100,
  578. 0xf08d, 0xffffffff, 0x00000100,
  579. 0xf094, 0xffffffff, 0x00000100,
  580. 0xf095, 0xffffffff, 0x00000100,
  581. 0xf096, 0xffffffff, 0x00000100,
  582. 0xf097, 0xffffffff, 0x00000100,
  583. 0xf098, 0xffffffff, 0x00000100,
  584. 0xf09f, 0xffffffff, 0x00000100,
  585. 0xf09e, 0xffffffff, 0x00000100,
  586. 0xf084, 0xffffffff, 0x06000100,
  587. 0xf0a4, 0xffffffff, 0x00000100,
  588. 0xf09d, 0xffffffff, 0x00000100,
  589. 0xf0ad, 0xffffffff, 0x00000100,
  590. 0xf0ac, 0xffffffff, 0x00000100,
  591. 0xf09c, 0xffffffff, 0x00000100,
  592. 0xc200, 0xffffffff, 0xe0000000,
  593. 0xf008, 0xffffffff, 0x00010000,
  594. 0xf009, 0xffffffff, 0x00030002,
  595. 0xf00a, 0xffffffff, 0x00040007,
  596. 0xf00b, 0xffffffff, 0x00060005,
  597. 0xf00c, 0xffffffff, 0x00090008,
  598. 0xf00d, 0xffffffff, 0x00010000,
  599. 0xf00e, 0xffffffff, 0x00030002,
  600. 0xf00f, 0xffffffff, 0x00040007,
  601. 0xf010, 0xffffffff, 0x00060005,
  602. 0xf011, 0xffffffff, 0x00090008,
  603. 0xf012, 0xffffffff, 0x00010000,
  604. 0xf013, 0xffffffff, 0x00030002,
  605. 0xf014, 0xffffffff, 0x00040007,
  606. 0xf015, 0xffffffff, 0x00060005,
  607. 0xf016, 0xffffffff, 0x00090008,
  608. 0xf017, 0xffffffff, 0x00010000,
  609. 0xf018, 0xffffffff, 0x00030002,
  610. 0xf019, 0xffffffff, 0x00040007,
  611. 0xf01a, 0xffffffff, 0x00060005,
  612. 0xf01b, 0xffffffff, 0x00090008,
  613. 0xf01c, 0xffffffff, 0x00010000,
  614. 0xf01d, 0xffffffff, 0x00030002,
  615. 0xf01e, 0xffffffff, 0x00040007,
  616. 0xf01f, 0xffffffff, 0x00060005,
  617. 0xf020, 0xffffffff, 0x00090008,
  618. 0xf021, 0xffffffff, 0x00010000,
  619. 0xf022, 0xffffffff, 0x00030002,
  620. 0xf023, 0xffffffff, 0x00040007,
  621. 0xf024, 0xffffffff, 0x00060005,
  622. 0xf025, 0xffffffff, 0x00090008,
  623. 0xf026, 0xffffffff, 0x00010000,
  624. 0xf027, 0xffffffff, 0x00030002,
  625. 0xf028, 0xffffffff, 0x00040007,
  626. 0xf029, 0xffffffff, 0x00060005,
  627. 0xf02a, 0xffffffff, 0x00090008,
  628. 0xf02b, 0xffffffff, 0x00010000,
  629. 0xf02c, 0xffffffff, 0x00030002,
  630. 0xf02d, 0xffffffff, 0x00040007,
  631. 0xf02e, 0xffffffff, 0x00060005,
  632. 0xf02f, 0xffffffff, 0x00090008,
  633. 0xf030, 0xffffffff, 0x00010000,
  634. 0xf031, 0xffffffff, 0x00030002,
  635. 0xf032, 0xffffffff, 0x00040007,
  636. 0xf033, 0xffffffff, 0x00060005,
  637. 0xf034, 0xffffffff, 0x00090008,
  638. 0xf035, 0xffffffff, 0x00010000,
  639. 0xf036, 0xffffffff, 0x00030002,
  640. 0xf037, 0xffffffff, 0x00040007,
  641. 0xf038, 0xffffffff, 0x00060005,
  642. 0xf039, 0xffffffff, 0x00090008,
  643. 0xf03a, 0xffffffff, 0x00010000,
  644. 0xf03b, 0xffffffff, 0x00030002,
  645. 0xf03c, 0xffffffff, 0x00040007,
  646. 0xf03d, 0xffffffff, 0x00060005,
  647. 0xf03e, 0xffffffff, 0x00090008,
  648. 0x30c6, 0xffffffff, 0x00020200,
  649. 0xcd4, 0xffffffff, 0x00000200,
  650. 0x570, 0xffffffff, 0x00000400,
  651. 0x157a, 0xffffffff, 0x00000000,
  652. 0xbd4, 0xffffffff, 0x00000902,
  653. 0xf000, 0xffffffff, 0x96940200,
  654. 0x21c2, 0xffffffff, 0x00900100,
  655. 0x3109, 0xffffffff, 0x0020003f,
  656. 0xe, 0xffffffff, 0x0140001c,
  657. 0xf, 0x000f0000, 0x000f0000,
  658. 0x88, 0xffffffff, 0xc060000c,
  659. 0x89, 0xc0000fff, 0x00000100,
  660. 0x3e4, 0xffffffff, 0x00000100,
  661. 0x3e6, 0x00000101, 0x00000000,
  662. 0x82a, 0xffffffff, 0x00000104,
  663. 0x1579, 0xff000fff, 0x00000100,
  664. 0xc33, 0xc0000fff, 0x00000104,
  665. 0x3079, 0x00000001, 0x00000001,
  666. 0x3403, 0xff000ff0, 0x00000100,
  667. 0x3603, 0xff000ff0, 0x00000100
  668. };
  669. static const u32 godavari_golden_registers[] =
  670. {
  671. 0x1579, 0xff607fff, 0xfc000100,
  672. 0x1bb6, 0x00010101, 0x00010000,
  673. 0x260c, 0xffffffff, 0x00000000,
  674. 0x260c0, 0xf00fffff, 0x00000400,
  675. 0x184c, 0xffffffff, 0x00010000,
  676. 0x16ec, 0x000000f0, 0x00000070,
  677. 0x16f0, 0xf0311fff, 0x80300000,
  678. 0x263e, 0x73773777, 0x12010001,
  679. 0x263f, 0xffffffff, 0x00000010,
  680. 0x200c, 0x00001f0f, 0x0000100a,
  681. 0xbd2, 0x73773777, 0x12010001,
  682. 0x902, 0x000fffff, 0x000c007f,
  683. 0x2285, 0xf000003f, 0x00000007,
  684. 0x22c9, 0xffffffff, 0x00ff0fff,
  685. 0xc281, 0x0000ff0f, 0x00000000,
  686. 0xa293, 0x07ffffff, 0x06000000,
  687. 0x136, 0x00000fff, 0x00000100,
  688. 0x3405, 0x00010000, 0x00810001,
  689. 0x3605, 0x00010000, 0x00810001,
  690. 0xf9e, 0x00000001, 0x00000002,
  691. 0x31da, 0x00000008, 0x00000008,
  692. 0x31dc, 0x00000f00, 0x00000800,
  693. 0x31dd, 0x00000f00, 0x00000800,
  694. 0x31e6, 0x00ffffff, 0x00ff7fbf,
  695. 0x31e7, 0x00ffffff, 0x00ff7faf,
  696. 0x2300, 0x000000ff, 0x00000001,
  697. 0x853e, 0x01ff01ff, 0x00000002,
  698. 0x8526, 0x007ff800, 0x00200000,
  699. 0x8057, 0xffffffff, 0x00000f40,
  700. 0x2231, 0x001f3ae3, 0x00000082,
  701. 0x2235, 0x0000001f, 0x00000010,
  702. 0xc24d, 0xffffffff, 0x00000000
  703. };
  704. static void cik_init_golden_registers(struct amdgpu_device *adev)
  705. {
  706. /* Some of the registers might be dependent on GRBM_GFX_INDEX */
  707. mutex_lock(&adev->grbm_idx_mutex);
  708. switch (adev->asic_type) {
  709. case CHIP_BONAIRE:
  710. amdgpu_program_register_sequence(adev,
  711. bonaire_mgcg_cgcg_init,
  712. (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
  713. amdgpu_program_register_sequence(adev,
  714. bonaire_golden_registers,
  715. (const u32)ARRAY_SIZE(bonaire_golden_registers));
  716. amdgpu_program_register_sequence(adev,
  717. bonaire_golden_common_registers,
  718. (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
  719. amdgpu_program_register_sequence(adev,
  720. bonaire_golden_spm_registers,
  721. (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
  722. break;
  723. case CHIP_KABINI:
  724. amdgpu_program_register_sequence(adev,
  725. kalindi_mgcg_cgcg_init,
  726. (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
  727. amdgpu_program_register_sequence(adev,
  728. kalindi_golden_registers,
  729. (const u32)ARRAY_SIZE(kalindi_golden_registers));
  730. amdgpu_program_register_sequence(adev,
  731. kalindi_golden_common_registers,
  732. (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
  733. amdgpu_program_register_sequence(adev,
  734. kalindi_golden_spm_registers,
  735. (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
  736. break;
  737. case CHIP_MULLINS:
  738. amdgpu_program_register_sequence(adev,
  739. kalindi_mgcg_cgcg_init,
  740. (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
  741. amdgpu_program_register_sequence(adev,
  742. godavari_golden_registers,
  743. (const u32)ARRAY_SIZE(godavari_golden_registers));
  744. amdgpu_program_register_sequence(adev,
  745. kalindi_golden_common_registers,
  746. (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
  747. amdgpu_program_register_sequence(adev,
  748. kalindi_golden_spm_registers,
  749. (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
  750. break;
  751. case CHIP_KAVERI:
  752. amdgpu_program_register_sequence(adev,
  753. spectre_mgcg_cgcg_init,
  754. (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
  755. amdgpu_program_register_sequence(adev,
  756. spectre_golden_registers,
  757. (const u32)ARRAY_SIZE(spectre_golden_registers));
  758. amdgpu_program_register_sequence(adev,
  759. spectre_golden_common_registers,
  760. (const u32)ARRAY_SIZE(spectre_golden_common_registers));
  761. amdgpu_program_register_sequence(adev,
  762. spectre_golden_spm_registers,
  763. (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
  764. break;
  765. case CHIP_HAWAII:
  766. amdgpu_program_register_sequence(adev,
  767. hawaii_mgcg_cgcg_init,
  768. (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
  769. amdgpu_program_register_sequence(adev,
  770. hawaii_golden_registers,
  771. (const u32)ARRAY_SIZE(hawaii_golden_registers));
  772. amdgpu_program_register_sequence(adev,
  773. hawaii_golden_common_registers,
  774. (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
  775. amdgpu_program_register_sequence(adev,
  776. hawaii_golden_spm_registers,
  777. (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
  778. break;
  779. default:
  780. break;
  781. }
  782. mutex_unlock(&adev->grbm_idx_mutex);
  783. }
  784. /**
  785. * cik_get_xclk - get the xclk
  786. *
  787. * @adev: amdgpu_device pointer
  788. *
  789. * Returns the reference clock used by the gfx engine
  790. * (CIK).
  791. */
  792. static u32 cik_get_xclk(struct amdgpu_device *adev)
  793. {
  794. u32 reference_clock = adev->clock.spll.reference_freq;
  795. if (adev->flags & AMD_IS_APU) {
  796. if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
  797. return reference_clock / 2;
  798. } else {
  799. if (RREG32_SMC(ixCG_CLKPIN_CNTL) & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
  800. return reference_clock / 4;
  801. }
  802. return reference_clock;
  803. }
  804. /**
  805. * cik_srbm_select - select specific register instances
  806. *
  807. * @adev: amdgpu_device pointer
  808. * @me: selected ME (micro engine)
  809. * @pipe: pipe
  810. * @queue: queue
  811. * @vmid: VMID
  812. *
  813. * Switches the currently active registers instances. Some
  814. * registers are instanced per VMID, others are instanced per
  815. * me/pipe/queue combination.
  816. */
  817. void cik_srbm_select(struct amdgpu_device *adev,
  818. u32 me, u32 pipe, u32 queue, u32 vmid)
  819. {
  820. u32 srbm_gfx_cntl =
  821. (((pipe << SRBM_GFX_CNTL__PIPEID__SHIFT) & SRBM_GFX_CNTL__PIPEID_MASK)|
  822. ((me << SRBM_GFX_CNTL__MEID__SHIFT) & SRBM_GFX_CNTL__MEID_MASK)|
  823. ((vmid << SRBM_GFX_CNTL__VMID__SHIFT) & SRBM_GFX_CNTL__VMID_MASK)|
  824. ((queue << SRBM_GFX_CNTL__QUEUEID__SHIFT) & SRBM_GFX_CNTL__QUEUEID_MASK));
  825. WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
  826. }
  827. static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
  828. {
  829. uint32_t tmp;
  830. tmp = RREG32(mmCONFIG_CNTL);
  831. if (state == false)
  832. tmp |= CONFIG_CNTL__VGA_DIS_MASK;
  833. else
  834. tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
  835. WREG32(mmCONFIG_CNTL, tmp);
  836. }
  837. static bool cik_read_disabled_bios(struct amdgpu_device *adev)
  838. {
  839. u32 bus_cntl;
  840. u32 d1vga_control = 0;
  841. u32 d2vga_control = 0;
  842. u32 vga_render_control = 0;
  843. u32 rom_cntl;
  844. bool r;
  845. bus_cntl = RREG32(mmBUS_CNTL);
  846. if (adev->mode_info.num_crtc) {
  847. d1vga_control = RREG32(mmD1VGA_CONTROL);
  848. d2vga_control = RREG32(mmD2VGA_CONTROL);
  849. vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
  850. }
  851. rom_cntl = RREG32_SMC(ixROM_CNTL);
  852. /* enable the rom */
  853. WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
  854. if (adev->mode_info.num_crtc) {
  855. /* Disable VGA mode */
  856. WREG32(mmD1VGA_CONTROL,
  857. (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
  858. D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
  859. WREG32(mmD2VGA_CONTROL,
  860. (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
  861. D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
  862. WREG32(mmVGA_RENDER_CONTROL,
  863. (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
  864. }
  865. WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
  866. r = amdgpu_read_bios(adev);
  867. /* restore regs */
  868. WREG32(mmBUS_CNTL, bus_cntl);
  869. if (adev->mode_info.num_crtc) {
  870. WREG32(mmD1VGA_CONTROL, d1vga_control);
  871. WREG32(mmD2VGA_CONTROL, d2vga_control);
  872. WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
  873. }
  874. WREG32_SMC(ixROM_CNTL, rom_cntl);
  875. return r;
  876. }
  877. static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
  878. {mmGRBM_STATUS, false},
  879. {mmGB_ADDR_CONFIG, false},
  880. {mmMC_ARB_RAMCFG, false},
  881. {mmGB_TILE_MODE0, false},
  882. {mmGB_TILE_MODE1, false},
  883. {mmGB_TILE_MODE2, false},
  884. {mmGB_TILE_MODE3, false},
  885. {mmGB_TILE_MODE4, false},
  886. {mmGB_TILE_MODE5, false},
  887. {mmGB_TILE_MODE6, false},
  888. {mmGB_TILE_MODE7, false},
  889. {mmGB_TILE_MODE8, false},
  890. {mmGB_TILE_MODE9, false},
  891. {mmGB_TILE_MODE10, false},
  892. {mmGB_TILE_MODE11, false},
  893. {mmGB_TILE_MODE12, false},
  894. {mmGB_TILE_MODE13, false},
  895. {mmGB_TILE_MODE14, false},
  896. {mmGB_TILE_MODE15, false},
  897. {mmGB_TILE_MODE16, false},
  898. {mmGB_TILE_MODE17, false},
  899. {mmGB_TILE_MODE18, false},
  900. {mmGB_TILE_MODE19, false},
  901. {mmGB_TILE_MODE20, false},
  902. {mmGB_TILE_MODE21, false},
  903. {mmGB_TILE_MODE22, false},
  904. {mmGB_TILE_MODE23, false},
  905. {mmGB_TILE_MODE24, false},
  906. {mmGB_TILE_MODE25, false},
  907. {mmGB_TILE_MODE26, false},
  908. {mmGB_TILE_MODE27, false},
  909. {mmGB_TILE_MODE28, false},
  910. {mmGB_TILE_MODE29, false},
  911. {mmGB_TILE_MODE30, false},
  912. {mmGB_TILE_MODE31, false},
  913. {mmGB_MACROTILE_MODE0, false},
  914. {mmGB_MACROTILE_MODE1, false},
  915. {mmGB_MACROTILE_MODE2, false},
  916. {mmGB_MACROTILE_MODE3, false},
  917. {mmGB_MACROTILE_MODE4, false},
  918. {mmGB_MACROTILE_MODE5, false},
  919. {mmGB_MACROTILE_MODE6, false},
  920. {mmGB_MACROTILE_MODE7, false},
  921. {mmGB_MACROTILE_MODE8, false},
  922. {mmGB_MACROTILE_MODE9, false},
  923. {mmGB_MACROTILE_MODE10, false},
  924. {mmGB_MACROTILE_MODE11, false},
  925. {mmGB_MACROTILE_MODE12, false},
  926. {mmGB_MACROTILE_MODE13, false},
  927. {mmGB_MACROTILE_MODE14, false},
  928. {mmGB_MACROTILE_MODE15, false},
  929. {mmCC_RB_BACKEND_DISABLE, false, true},
  930. {mmGC_USER_RB_BACKEND_DISABLE, false, true},
  931. {mmGB_BACKEND_MAP, false, false},
  932. {mmPA_SC_RASTER_CONFIG, false, true},
  933. {mmPA_SC_RASTER_CONFIG_1, false, true},
  934. };
  935. static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
  936. u32 se_num, u32 sh_num,
  937. u32 reg_offset)
  938. {
  939. uint32_t val;
  940. mutex_lock(&adev->grbm_idx_mutex);
  941. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  942. gfx_v7_0_select_se_sh(adev, se_num, sh_num);
  943. val = RREG32(reg_offset);
  944. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  945. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  946. mutex_unlock(&adev->grbm_idx_mutex);
  947. return val;
  948. }
  949. static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
  950. u32 sh_num, u32 reg_offset, u32 *value)
  951. {
  952. uint32_t i;
  953. *value = 0;
  954. for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
  955. if (reg_offset != cik_allowed_read_registers[i].reg_offset)
  956. continue;
  957. if (!cik_allowed_read_registers[i].untouched)
  958. *value = cik_allowed_read_registers[i].grbm_indexed ?
  959. cik_read_indexed_register(adev, se_num,
  960. sh_num, reg_offset) :
  961. RREG32(reg_offset);
  962. return 0;
  963. }
  964. return -EINVAL;
  965. }
  966. static void cik_print_gpu_status_regs(struct amdgpu_device *adev)
  967. {
  968. dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
  969. RREG32(mmGRBM_STATUS));
  970. dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
  971. RREG32(mmGRBM_STATUS2));
  972. dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  973. RREG32(mmGRBM_STATUS_SE0));
  974. dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  975. RREG32(mmGRBM_STATUS_SE1));
  976. dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
  977. RREG32(mmGRBM_STATUS_SE2));
  978. dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
  979. RREG32(mmGRBM_STATUS_SE3));
  980. dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
  981. RREG32(mmSRBM_STATUS));
  982. dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
  983. RREG32(mmSRBM_STATUS2));
  984. dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
  985. RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
  986. dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
  987. RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
  988. dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
  989. dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
  990. RREG32(mmCP_STALLED_STAT1));
  991. dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
  992. RREG32(mmCP_STALLED_STAT2));
  993. dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
  994. RREG32(mmCP_STALLED_STAT3));
  995. dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
  996. RREG32(mmCP_CPF_BUSY_STAT));
  997. dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
  998. RREG32(mmCP_CPF_STALLED_STAT1));
  999. dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
  1000. dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
  1001. dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
  1002. RREG32(mmCP_CPC_STALLED_STAT1));
  1003. dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
  1004. }
  1005. /**
  1006. * cik_gpu_check_soft_reset - check which blocks are busy
  1007. *
  1008. * @adev: amdgpu_device pointer
  1009. *
  1010. * Check which blocks are busy and return the relevant reset
  1011. * mask to be used by cik_gpu_soft_reset().
  1012. * Returns a mask of the blocks to be reset.
  1013. */
  1014. u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev)
  1015. {
  1016. u32 reset_mask = 0;
  1017. u32 tmp;
  1018. /* GRBM_STATUS */
  1019. tmp = RREG32(mmGRBM_STATUS);
  1020. if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
  1021. GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
  1022. GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
  1023. GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
  1024. GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
  1025. GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
  1026. reset_mask |= AMDGPU_RESET_GFX;
  1027. if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
  1028. reset_mask |= AMDGPU_RESET_CP;
  1029. /* GRBM_STATUS2 */
  1030. tmp = RREG32(mmGRBM_STATUS2);
  1031. if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
  1032. reset_mask |= AMDGPU_RESET_RLC;
  1033. /* SDMA0_STATUS_REG */
  1034. tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
  1035. if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
  1036. reset_mask |= AMDGPU_RESET_DMA;
  1037. /* SDMA1_STATUS_REG */
  1038. tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
  1039. if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
  1040. reset_mask |= AMDGPU_RESET_DMA1;
  1041. /* SRBM_STATUS2 */
  1042. tmp = RREG32(mmSRBM_STATUS2);
  1043. if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
  1044. reset_mask |= AMDGPU_RESET_DMA;
  1045. if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
  1046. reset_mask |= AMDGPU_RESET_DMA1;
  1047. /* SRBM_STATUS */
  1048. tmp = RREG32(mmSRBM_STATUS);
  1049. if (tmp & SRBM_STATUS__IH_BUSY_MASK)
  1050. reset_mask |= AMDGPU_RESET_IH;
  1051. if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
  1052. reset_mask |= AMDGPU_RESET_SEM;
  1053. if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
  1054. reset_mask |= AMDGPU_RESET_GRBM;
  1055. if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
  1056. reset_mask |= AMDGPU_RESET_VMC;
  1057. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1058. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
  1059. reset_mask |= AMDGPU_RESET_MC;
  1060. if (amdgpu_display_is_display_hung(adev))
  1061. reset_mask |= AMDGPU_RESET_DISPLAY;
  1062. /* Skip MC reset as it's mostly likely not hung, just busy */
  1063. if (reset_mask & AMDGPU_RESET_MC) {
  1064. DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
  1065. reset_mask &= ~AMDGPU_RESET_MC;
  1066. }
  1067. return reset_mask;
  1068. }
  1069. /**
  1070. * cik_gpu_soft_reset - soft reset GPU
  1071. *
  1072. * @adev: amdgpu_device pointer
  1073. * @reset_mask: mask of which blocks to reset
  1074. *
  1075. * Soft reset the blocks specified in @reset_mask.
  1076. */
  1077. static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
  1078. {
  1079. struct amdgpu_mode_mc_save save;
  1080. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  1081. u32 tmp;
  1082. if (reset_mask == 0)
  1083. return;
  1084. dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
  1085. cik_print_gpu_status_regs(adev);
  1086. dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  1087. RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
  1088. dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  1089. RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
  1090. /* disable CG/PG */
  1091. /* stop the rlc */
  1092. gfx_v7_0_rlc_stop(adev);
  1093. /* Disable GFX parsing/prefetching */
  1094. WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
  1095. /* Disable MEC parsing/prefetching */
  1096. WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
  1097. if (reset_mask & AMDGPU_RESET_DMA) {
  1098. /* sdma0 */
  1099. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
  1100. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  1101. WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  1102. }
  1103. if (reset_mask & AMDGPU_RESET_DMA1) {
  1104. /* sdma1 */
  1105. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
  1106. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  1107. WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  1108. }
  1109. gmc_v7_0_mc_stop(adev, &save);
  1110. if (amdgpu_asic_wait_for_mc_idle(adev)) {
  1111. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  1112. }
  1113. if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP))
  1114. grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
  1115. GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
  1116. if (reset_mask & AMDGPU_RESET_CP) {
  1117. grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
  1118. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
  1119. }
  1120. if (reset_mask & AMDGPU_RESET_DMA)
  1121. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
  1122. if (reset_mask & AMDGPU_RESET_DMA1)
  1123. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
  1124. if (reset_mask & AMDGPU_RESET_DISPLAY)
  1125. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
  1126. if (reset_mask & AMDGPU_RESET_RLC)
  1127. grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
  1128. if (reset_mask & AMDGPU_RESET_SEM)
  1129. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK;
  1130. if (reset_mask & AMDGPU_RESET_IH)
  1131. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
  1132. if (reset_mask & AMDGPU_RESET_GRBM)
  1133. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
  1134. if (reset_mask & AMDGPU_RESET_VMC)
  1135. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
  1136. if (!(adev->flags & AMD_IS_APU)) {
  1137. if (reset_mask & AMDGPU_RESET_MC)
  1138. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
  1139. }
  1140. if (grbm_soft_reset) {
  1141. tmp = RREG32(mmGRBM_SOFT_RESET);
  1142. tmp |= grbm_soft_reset;
  1143. dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
  1144. WREG32(mmGRBM_SOFT_RESET, tmp);
  1145. tmp = RREG32(mmGRBM_SOFT_RESET);
  1146. udelay(50);
  1147. tmp &= ~grbm_soft_reset;
  1148. WREG32(mmGRBM_SOFT_RESET, tmp);
  1149. tmp = RREG32(mmGRBM_SOFT_RESET);
  1150. }
  1151. if (srbm_soft_reset) {
  1152. tmp = RREG32(mmSRBM_SOFT_RESET);
  1153. tmp |= srbm_soft_reset;
  1154. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  1155. WREG32(mmSRBM_SOFT_RESET, tmp);
  1156. tmp = RREG32(mmSRBM_SOFT_RESET);
  1157. udelay(50);
  1158. tmp &= ~srbm_soft_reset;
  1159. WREG32(mmSRBM_SOFT_RESET, tmp);
  1160. tmp = RREG32(mmSRBM_SOFT_RESET);
  1161. }
  1162. /* Wait a little for things to settle down */
  1163. udelay(50);
  1164. gmc_v7_0_mc_resume(adev, &save);
  1165. udelay(50);
  1166. cik_print_gpu_status_regs(adev);
  1167. }
  1168. struct kv_reset_save_regs {
  1169. u32 gmcon_reng_execute;
  1170. u32 gmcon_misc;
  1171. u32 gmcon_misc3;
  1172. };
  1173. static void kv_save_regs_for_reset(struct amdgpu_device *adev,
  1174. struct kv_reset_save_regs *save)
  1175. {
  1176. save->gmcon_reng_execute = RREG32(mmGMCON_RENG_EXECUTE);
  1177. save->gmcon_misc = RREG32(mmGMCON_MISC);
  1178. save->gmcon_misc3 = RREG32(mmGMCON_MISC3);
  1179. WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute &
  1180. ~GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK);
  1181. WREG32(mmGMCON_MISC, save->gmcon_misc &
  1182. ~(GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK |
  1183. GMCON_MISC__STCTRL_STUTTER_EN_MASK));
  1184. }
  1185. static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
  1186. struct kv_reset_save_regs *save)
  1187. {
  1188. int i;
  1189. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1190. WREG32(mmGMCON_PGFSM_CONFIG, 0x200010ff);
  1191. for (i = 0; i < 5; i++)
  1192. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1193. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1194. WREG32(mmGMCON_PGFSM_CONFIG, 0x300010ff);
  1195. for (i = 0; i < 5; i++)
  1196. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1197. WREG32(mmGMCON_PGFSM_WRITE, 0x210000);
  1198. WREG32(mmGMCON_PGFSM_CONFIG, 0xa00010ff);
  1199. for (i = 0; i < 5; i++)
  1200. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1201. WREG32(mmGMCON_PGFSM_WRITE, 0x21003);
  1202. WREG32(mmGMCON_PGFSM_CONFIG, 0xb00010ff);
  1203. for (i = 0; i < 5; i++)
  1204. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1205. WREG32(mmGMCON_PGFSM_WRITE, 0x2b00);
  1206. WREG32(mmGMCON_PGFSM_CONFIG, 0xc00010ff);
  1207. for (i = 0; i < 5; i++)
  1208. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1209. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1210. WREG32(mmGMCON_PGFSM_CONFIG, 0xd00010ff);
  1211. for (i = 0; i < 5; i++)
  1212. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1213. WREG32(mmGMCON_PGFSM_WRITE, 0x420000);
  1214. WREG32(mmGMCON_PGFSM_CONFIG, 0x100010ff);
  1215. for (i = 0; i < 5; i++)
  1216. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1217. WREG32(mmGMCON_PGFSM_WRITE, 0x120202);
  1218. WREG32(mmGMCON_PGFSM_CONFIG, 0x500010ff);
  1219. for (i = 0; i < 5; i++)
  1220. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1221. WREG32(mmGMCON_PGFSM_WRITE, 0x3e3e36);
  1222. WREG32(mmGMCON_PGFSM_CONFIG, 0x600010ff);
  1223. for (i = 0; i < 5; i++)
  1224. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1225. WREG32(mmGMCON_PGFSM_WRITE, 0x373f3e);
  1226. WREG32(mmGMCON_PGFSM_CONFIG, 0x700010ff);
  1227. for (i = 0; i < 5; i++)
  1228. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1229. WREG32(mmGMCON_PGFSM_WRITE, 0x3e1332);
  1230. WREG32(mmGMCON_PGFSM_CONFIG, 0xe00010ff);
  1231. WREG32(mmGMCON_MISC3, save->gmcon_misc3);
  1232. WREG32(mmGMCON_MISC, save->gmcon_misc);
  1233. WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
  1234. }
  1235. static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
  1236. {
  1237. struct amdgpu_mode_mc_save save;
  1238. struct kv_reset_save_regs kv_save = { 0 };
  1239. u32 tmp, i;
  1240. dev_info(adev->dev, "GPU pci config reset\n");
  1241. /* disable dpm? */
  1242. /* disable cg/pg */
  1243. /* Disable GFX parsing/prefetching */
  1244. WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
  1245. CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
  1246. /* Disable MEC parsing/prefetching */
  1247. WREG32(mmCP_MEC_CNTL,
  1248. CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
  1249. /* sdma0 */
  1250. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
  1251. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  1252. WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  1253. /* sdma1 */
  1254. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
  1255. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  1256. WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  1257. /* XXX other engines? */
  1258. /* halt the rlc, disable cp internal ints */
  1259. gfx_v7_0_rlc_stop(adev);
  1260. udelay(50);
  1261. /* disable mem access */
  1262. gmc_v7_0_mc_stop(adev, &save);
  1263. if (amdgpu_asic_wait_for_mc_idle(adev)) {
  1264. dev_warn(adev->dev, "Wait for MC idle timed out !\n");
  1265. }
  1266. if (adev->flags & AMD_IS_APU)
  1267. kv_save_regs_for_reset(adev, &kv_save);
  1268. /* disable BM */
  1269. pci_clear_master(adev->pdev);
  1270. /* reset */
  1271. amdgpu_pci_config_reset(adev);
  1272. udelay(100);
  1273. /* wait for asic to come out of reset */
  1274. for (i = 0; i < adev->usec_timeout; i++) {
  1275. if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
  1276. break;
  1277. udelay(1);
  1278. }
  1279. /* does asic init need to be run first??? */
  1280. if (adev->flags & AMD_IS_APU)
  1281. kv_restore_regs_for_reset(adev, &kv_save);
  1282. }
  1283. static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
  1284. {
  1285. u32 tmp = RREG32(mmBIOS_SCRATCH_3);
  1286. if (hung)
  1287. tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
  1288. else
  1289. tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
  1290. WREG32(mmBIOS_SCRATCH_3, tmp);
  1291. }
  1292. /**
  1293. * cik_asic_reset - soft reset GPU
  1294. *
  1295. * @adev: amdgpu_device pointer
  1296. *
  1297. * Look up which blocks are hung and attempt
  1298. * to reset them.
  1299. * Returns 0 for success.
  1300. */
  1301. static int cik_asic_reset(struct amdgpu_device *adev)
  1302. {
  1303. u32 reset_mask;
  1304. reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
  1305. if (reset_mask)
  1306. cik_set_bios_scratch_engine_hung(adev, true);
  1307. /* try soft reset */
  1308. cik_gpu_soft_reset(adev, reset_mask);
  1309. reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
  1310. /* try pci config reset */
  1311. if (reset_mask && amdgpu_hard_reset)
  1312. cik_gpu_pci_config_reset(adev);
  1313. reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
  1314. if (!reset_mask)
  1315. cik_set_bios_scratch_engine_hung(adev, false);
  1316. return 0;
  1317. }
  1318. static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
  1319. u32 cntl_reg, u32 status_reg)
  1320. {
  1321. int r, i;
  1322. struct atom_clock_dividers dividers;
  1323. uint32_t tmp;
  1324. r = amdgpu_atombios_get_clock_dividers(adev,
  1325. COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
  1326. clock, false, &dividers);
  1327. if (r)
  1328. return r;
  1329. tmp = RREG32_SMC(cntl_reg);
  1330. tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
  1331. CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
  1332. tmp |= dividers.post_divider;
  1333. WREG32_SMC(cntl_reg, tmp);
  1334. for (i = 0; i < 100; i++) {
  1335. if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
  1336. break;
  1337. mdelay(10);
  1338. }
  1339. if (i == 100)
  1340. return -ETIMEDOUT;
  1341. return 0;
  1342. }
  1343. static int cik_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
  1344. {
  1345. int r = 0;
  1346. r = cik_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
  1347. if (r)
  1348. return r;
  1349. r = cik_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
  1350. return r;
  1351. }
  1352. static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
  1353. {
  1354. int r, i;
  1355. struct atom_clock_dividers dividers;
  1356. u32 tmp;
  1357. r = amdgpu_atombios_get_clock_dividers(adev,
  1358. COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
  1359. ecclk, false, &dividers);
  1360. if (r)
  1361. return r;
  1362. for (i = 0; i < 100; i++) {
  1363. if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
  1364. break;
  1365. mdelay(10);
  1366. }
  1367. if (i == 100)
  1368. return -ETIMEDOUT;
  1369. tmp = RREG32_SMC(ixCG_ECLK_CNTL);
  1370. tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
  1371. CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
  1372. tmp |= dividers.post_divider;
  1373. WREG32_SMC(ixCG_ECLK_CNTL, tmp);
  1374. for (i = 0; i < 100; i++) {
  1375. if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
  1376. break;
  1377. mdelay(10);
  1378. }
  1379. if (i == 100)
  1380. return -ETIMEDOUT;
  1381. return 0;
  1382. }
  1383. static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
  1384. {
  1385. struct pci_dev *root = adev->pdev->bus->self;
  1386. int bridge_pos, gpu_pos;
  1387. u32 speed_cntl, mask, current_data_rate;
  1388. int ret, i;
  1389. u16 tmp16;
  1390. if (pci_is_root_bus(adev->pdev->bus))
  1391. return;
  1392. if (amdgpu_pcie_gen2 == 0)
  1393. return;
  1394. if (adev->flags & AMD_IS_APU)
  1395. return;
  1396. ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
  1397. if (ret != 0)
  1398. return;
  1399. if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
  1400. return;
  1401. speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
  1402. current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
  1403. PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
  1404. if (mask & DRM_PCIE_SPEED_80) {
  1405. if (current_data_rate == 2) {
  1406. DRM_INFO("PCIE gen 3 link speeds already enabled\n");
  1407. return;
  1408. }
  1409. DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
  1410. } else if (mask & DRM_PCIE_SPEED_50) {
  1411. if (current_data_rate == 1) {
  1412. DRM_INFO("PCIE gen 2 link speeds already enabled\n");
  1413. return;
  1414. }
  1415. DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
  1416. }
  1417. bridge_pos = pci_pcie_cap(root);
  1418. if (!bridge_pos)
  1419. return;
  1420. gpu_pos = pci_pcie_cap(adev->pdev);
  1421. if (!gpu_pos)
  1422. return;
  1423. if (mask & DRM_PCIE_SPEED_80) {
  1424. /* re-try equalization if gen3 is not already enabled */
  1425. if (current_data_rate != 2) {
  1426. u16 bridge_cfg, gpu_cfg;
  1427. u16 bridge_cfg2, gpu_cfg2;
  1428. u32 max_lw, current_lw, tmp;
  1429. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  1430. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  1431. tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
  1432. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  1433. tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
  1434. pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  1435. tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
  1436. max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
  1437. PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
  1438. current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK)
  1439. >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
  1440. if (current_lw < max_lw) {
  1441. tmp = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
  1442. if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
  1443. tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK |
  1444. PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
  1445. tmp |= (max_lw <<
  1446. PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
  1447. tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK |
  1448. PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK |
  1449. PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
  1450. WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
  1451. }
  1452. }
  1453. for (i = 0; i < 10; i++) {
  1454. /* check status */
  1455. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
  1456. if (tmp16 & PCI_EXP_DEVSTA_TRPND)
  1457. break;
  1458. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  1459. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  1460. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
  1461. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
  1462. tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
  1463. tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
  1464. WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
  1465. tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
  1466. tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
  1467. WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
  1468. mdelay(100);
  1469. /* linkctl */
  1470. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
  1471. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  1472. tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
  1473. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  1474. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
  1475. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  1476. tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
  1477. pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  1478. /* linkctl2 */
  1479. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
  1480. tmp16 &= ~((1 << 4) | (7 << 9));
  1481. tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
  1482. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
  1483. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  1484. tmp16 &= ~((1 << 4) | (7 << 9));
  1485. tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
  1486. pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  1487. tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
  1488. tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
  1489. WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
  1490. }
  1491. }
  1492. }
  1493. /* set the link speed */
  1494. speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK |
  1495. PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
  1496. speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
  1497. WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
  1498. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  1499. tmp16 &= ~0xf;
  1500. if (mask & DRM_PCIE_SPEED_80)
  1501. tmp16 |= 3; /* gen3 */
  1502. else if (mask & DRM_PCIE_SPEED_50)
  1503. tmp16 |= 2; /* gen2 */
  1504. else
  1505. tmp16 |= 1; /* gen1 */
  1506. pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  1507. speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
  1508. speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
  1509. WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
  1510. for (i = 0; i < adev->usec_timeout; i++) {
  1511. speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
  1512. if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
  1513. break;
  1514. udelay(1);
  1515. }
  1516. }
  1517. static void cik_program_aspm(struct amdgpu_device *adev)
  1518. {
  1519. u32 data, orig;
  1520. bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
  1521. bool disable_clkreq = false;
  1522. if (amdgpu_aspm == 0)
  1523. return;
  1524. /* XXX double check APUs */
  1525. if (adev->flags & AMD_IS_APU)
  1526. return;
  1527. orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
  1528. data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
  1529. data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) |
  1530. PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
  1531. if (orig != data)
  1532. WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
  1533. orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
  1534. data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
  1535. if (orig != data)
  1536. WREG32_PCIE(ixPCIE_LC_CNTL3, data);
  1537. orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
  1538. data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
  1539. if (orig != data)
  1540. WREG32_PCIE(ixPCIE_P_CNTL, data);
  1541. orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
  1542. data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK |
  1543. PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
  1544. data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
  1545. if (!disable_l0s)
  1546. data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
  1547. if (!disable_l1) {
  1548. data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
  1549. data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
  1550. if (orig != data)
  1551. WREG32_PCIE(ixPCIE_LC_CNTL, data);
  1552. if (!disable_plloff_in_l1) {
  1553. bool clk_req_support;
  1554. orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_0);
  1555. data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
  1556. PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
  1557. data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
  1558. (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
  1559. if (orig != data)
  1560. WREG32_PCIE(ixPB0_PIF_PWRDOWN_0, data);
  1561. orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_1);
  1562. data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
  1563. PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
  1564. data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
  1565. (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
  1566. if (orig != data)
  1567. WREG32_PCIE(ixPB0_PIF_PWRDOWN_1, data);
  1568. orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_0);
  1569. data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
  1570. PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
  1571. data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
  1572. (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
  1573. if (orig != data)
  1574. WREG32_PCIE(ixPB1_PIF_PWRDOWN_0, data);
  1575. orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_1);
  1576. data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
  1577. PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
  1578. data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
  1579. (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
  1580. if (orig != data)
  1581. WREG32_PCIE(ixPB1_PIF_PWRDOWN_1, data);
  1582. orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
  1583. data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
  1584. data |= ~(3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
  1585. if (orig != data)
  1586. WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
  1587. if (!disable_clkreq) {
  1588. struct pci_dev *root = adev->pdev->bus->self;
  1589. u32 lnkcap;
  1590. clk_req_support = false;
  1591. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  1592. if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
  1593. clk_req_support = true;
  1594. } else {
  1595. clk_req_support = false;
  1596. }
  1597. if (clk_req_support) {
  1598. orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
  1599. data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
  1600. PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
  1601. if (orig != data)
  1602. WREG32_PCIE(ixPCIE_LC_CNTL2, data);
  1603. orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
  1604. data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK |
  1605. THM_CLK_CNTL__TMON_CLK_SEL_MASK);
  1606. data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
  1607. (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
  1608. if (orig != data)
  1609. WREG32_SMC(ixTHM_CLK_CNTL, data);
  1610. orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
  1611. data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
  1612. MISC_CLK_CTRL__ZCLK_SEL_MASK);
  1613. data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
  1614. (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
  1615. if (orig != data)
  1616. WREG32_SMC(ixMISC_CLK_CTRL, data);
  1617. orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
  1618. data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
  1619. if (orig != data)
  1620. WREG32_SMC(ixCG_CLKPIN_CNTL, data);
  1621. orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
  1622. data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
  1623. if (orig != data)
  1624. WREG32_SMC(ixCG_CLKPIN_CNTL_2, data);
  1625. orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
  1626. data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
  1627. data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
  1628. if (orig != data)
  1629. WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
  1630. }
  1631. }
  1632. } else {
  1633. if (orig != data)
  1634. WREG32_PCIE(ixPCIE_LC_CNTL, data);
  1635. }
  1636. orig = data = RREG32_PCIE(ixPCIE_CNTL2);
  1637. data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
  1638. PCIE_CNTL2__MST_MEM_LS_EN_MASK |
  1639. PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
  1640. if (orig != data)
  1641. WREG32_PCIE(ixPCIE_CNTL2, data);
  1642. if (!disable_l0s) {
  1643. data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
  1644. if ((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) ==
  1645. PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
  1646. data = RREG32_PCIE(ixPCIE_LC_STATUS1);
  1647. if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) &&
  1648. (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
  1649. orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
  1650. data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
  1651. if (orig != data)
  1652. WREG32_PCIE(ixPCIE_LC_CNTL, data);
  1653. }
  1654. }
  1655. }
  1656. }
  1657. static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
  1658. {
  1659. return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
  1660. >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
  1661. }
  1662. static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
  1663. {
  1664. /* ORDER MATTERS! */
  1665. {
  1666. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1667. .major = 1,
  1668. .minor = 0,
  1669. .rev = 0,
  1670. .funcs = &cik_common_ip_funcs,
  1671. },
  1672. {
  1673. .type = AMD_IP_BLOCK_TYPE_GMC,
  1674. .major = 7,
  1675. .minor = 0,
  1676. .rev = 0,
  1677. .funcs = &gmc_v7_0_ip_funcs,
  1678. },
  1679. {
  1680. .type = AMD_IP_BLOCK_TYPE_IH,
  1681. .major = 2,
  1682. .minor = 0,
  1683. .rev = 0,
  1684. .funcs = &cik_ih_ip_funcs,
  1685. },
  1686. {
  1687. .type = AMD_IP_BLOCK_TYPE_SMC,
  1688. .major = 7,
  1689. .minor = 0,
  1690. .rev = 0,
  1691. .funcs = &ci_dpm_ip_funcs,
  1692. },
  1693. {
  1694. .type = AMD_IP_BLOCK_TYPE_DCE,
  1695. .major = 8,
  1696. .minor = 2,
  1697. .rev = 0,
  1698. .funcs = &dce_v8_0_ip_funcs,
  1699. },
  1700. {
  1701. .type = AMD_IP_BLOCK_TYPE_GFX,
  1702. .major = 7,
  1703. .minor = 2,
  1704. .rev = 0,
  1705. .funcs = &gfx_v7_0_ip_funcs,
  1706. },
  1707. {
  1708. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1709. .major = 2,
  1710. .minor = 0,
  1711. .rev = 0,
  1712. .funcs = &cik_sdma_ip_funcs,
  1713. },
  1714. {
  1715. .type = AMD_IP_BLOCK_TYPE_UVD,
  1716. .major = 4,
  1717. .minor = 2,
  1718. .rev = 0,
  1719. .funcs = &uvd_v4_2_ip_funcs,
  1720. },
  1721. {
  1722. .type = AMD_IP_BLOCK_TYPE_VCE,
  1723. .major = 2,
  1724. .minor = 0,
  1725. .rev = 0,
  1726. .funcs = &vce_v2_0_ip_funcs,
  1727. },
  1728. };
  1729. static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
  1730. {
  1731. /* ORDER MATTERS! */
  1732. {
  1733. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1734. .major = 1,
  1735. .minor = 0,
  1736. .rev = 0,
  1737. .funcs = &cik_common_ip_funcs,
  1738. },
  1739. {
  1740. .type = AMD_IP_BLOCK_TYPE_GMC,
  1741. .major = 7,
  1742. .minor = 0,
  1743. .rev = 0,
  1744. .funcs = &gmc_v7_0_ip_funcs,
  1745. },
  1746. {
  1747. .type = AMD_IP_BLOCK_TYPE_IH,
  1748. .major = 2,
  1749. .minor = 0,
  1750. .rev = 0,
  1751. .funcs = &cik_ih_ip_funcs,
  1752. },
  1753. {
  1754. .type = AMD_IP_BLOCK_TYPE_SMC,
  1755. .major = 7,
  1756. .minor = 0,
  1757. .rev = 0,
  1758. .funcs = &ci_dpm_ip_funcs,
  1759. },
  1760. {
  1761. .type = AMD_IP_BLOCK_TYPE_DCE,
  1762. .major = 8,
  1763. .minor = 5,
  1764. .rev = 0,
  1765. .funcs = &dce_v8_0_ip_funcs,
  1766. },
  1767. {
  1768. .type = AMD_IP_BLOCK_TYPE_GFX,
  1769. .major = 7,
  1770. .minor = 3,
  1771. .rev = 0,
  1772. .funcs = &gfx_v7_0_ip_funcs,
  1773. },
  1774. {
  1775. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1776. .major = 2,
  1777. .minor = 0,
  1778. .rev = 0,
  1779. .funcs = &cik_sdma_ip_funcs,
  1780. },
  1781. {
  1782. .type = AMD_IP_BLOCK_TYPE_UVD,
  1783. .major = 4,
  1784. .minor = 2,
  1785. .rev = 0,
  1786. .funcs = &uvd_v4_2_ip_funcs,
  1787. },
  1788. {
  1789. .type = AMD_IP_BLOCK_TYPE_VCE,
  1790. .major = 2,
  1791. .minor = 0,
  1792. .rev = 0,
  1793. .funcs = &vce_v2_0_ip_funcs,
  1794. },
  1795. };
  1796. static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
  1797. {
  1798. /* ORDER MATTERS! */
  1799. {
  1800. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1801. .major = 1,
  1802. .minor = 0,
  1803. .rev = 0,
  1804. .funcs = &cik_common_ip_funcs,
  1805. },
  1806. {
  1807. .type = AMD_IP_BLOCK_TYPE_GMC,
  1808. .major = 7,
  1809. .minor = 0,
  1810. .rev = 0,
  1811. .funcs = &gmc_v7_0_ip_funcs,
  1812. },
  1813. {
  1814. .type = AMD_IP_BLOCK_TYPE_IH,
  1815. .major = 2,
  1816. .minor = 0,
  1817. .rev = 0,
  1818. .funcs = &cik_ih_ip_funcs,
  1819. },
  1820. {
  1821. .type = AMD_IP_BLOCK_TYPE_SMC,
  1822. .major = 7,
  1823. .minor = 0,
  1824. .rev = 0,
  1825. .funcs = &kv_dpm_ip_funcs,
  1826. },
  1827. {
  1828. .type = AMD_IP_BLOCK_TYPE_DCE,
  1829. .major = 8,
  1830. .minor = 3,
  1831. .rev = 0,
  1832. .funcs = &dce_v8_0_ip_funcs,
  1833. },
  1834. {
  1835. .type = AMD_IP_BLOCK_TYPE_GFX,
  1836. .major = 7,
  1837. .minor = 2,
  1838. .rev = 0,
  1839. .funcs = &gfx_v7_0_ip_funcs,
  1840. },
  1841. {
  1842. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1843. .major = 2,
  1844. .minor = 0,
  1845. .rev = 0,
  1846. .funcs = &cik_sdma_ip_funcs,
  1847. },
  1848. {
  1849. .type = AMD_IP_BLOCK_TYPE_UVD,
  1850. .major = 4,
  1851. .minor = 2,
  1852. .rev = 0,
  1853. .funcs = &uvd_v4_2_ip_funcs,
  1854. },
  1855. {
  1856. .type = AMD_IP_BLOCK_TYPE_VCE,
  1857. .major = 2,
  1858. .minor = 0,
  1859. .rev = 0,
  1860. .funcs = &vce_v2_0_ip_funcs,
  1861. },
  1862. };
  1863. static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
  1864. {
  1865. /* ORDER MATTERS! */
  1866. {
  1867. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1868. .major = 1,
  1869. .minor = 0,
  1870. .rev = 0,
  1871. .funcs = &cik_common_ip_funcs,
  1872. },
  1873. {
  1874. .type = AMD_IP_BLOCK_TYPE_GMC,
  1875. .major = 7,
  1876. .minor = 0,
  1877. .rev = 0,
  1878. .funcs = &gmc_v7_0_ip_funcs,
  1879. },
  1880. {
  1881. .type = AMD_IP_BLOCK_TYPE_IH,
  1882. .major = 2,
  1883. .minor = 0,
  1884. .rev = 0,
  1885. .funcs = &cik_ih_ip_funcs,
  1886. },
  1887. {
  1888. .type = AMD_IP_BLOCK_TYPE_SMC,
  1889. .major = 7,
  1890. .minor = 0,
  1891. .rev = 0,
  1892. .funcs = &kv_dpm_ip_funcs,
  1893. },
  1894. {
  1895. .type = AMD_IP_BLOCK_TYPE_DCE,
  1896. .major = 8,
  1897. .minor = 3,
  1898. .rev = 0,
  1899. .funcs = &dce_v8_0_ip_funcs,
  1900. },
  1901. {
  1902. .type = AMD_IP_BLOCK_TYPE_GFX,
  1903. .major = 7,
  1904. .minor = 2,
  1905. .rev = 0,
  1906. .funcs = &gfx_v7_0_ip_funcs,
  1907. },
  1908. {
  1909. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1910. .major = 2,
  1911. .minor = 0,
  1912. .rev = 0,
  1913. .funcs = &cik_sdma_ip_funcs,
  1914. },
  1915. {
  1916. .type = AMD_IP_BLOCK_TYPE_UVD,
  1917. .major = 4,
  1918. .minor = 2,
  1919. .rev = 0,
  1920. .funcs = &uvd_v4_2_ip_funcs,
  1921. },
  1922. {
  1923. .type = AMD_IP_BLOCK_TYPE_VCE,
  1924. .major = 2,
  1925. .minor = 0,
  1926. .rev = 0,
  1927. .funcs = &vce_v2_0_ip_funcs,
  1928. },
  1929. };
  1930. static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
  1931. {
  1932. /* ORDER MATTERS! */
  1933. {
  1934. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1935. .major = 1,
  1936. .minor = 0,
  1937. .rev = 0,
  1938. .funcs = &cik_common_ip_funcs,
  1939. },
  1940. {
  1941. .type = AMD_IP_BLOCK_TYPE_GMC,
  1942. .major = 7,
  1943. .minor = 0,
  1944. .rev = 0,
  1945. .funcs = &gmc_v7_0_ip_funcs,
  1946. },
  1947. {
  1948. .type = AMD_IP_BLOCK_TYPE_IH,
  1949. .major = 2,
  1950. .minor = 0,
  1951. .rev = 0,
  1952. .funcs = &cik_ih_ip_funcs,
  1953. },
  1954. {
  1955. .type = AMD_IP_BLOCK_TYPE_SMC,
  1956. .major = 7,
  1957. .minor = 0,
  1958. .rev = 0,
  1959. .funcs = &kv_dpm_ip_funcs,
  1960. },
  1961. {
  1962. .type = AMD_IP_BLOCK_TYPE_DCE,
  1963. .major = 8,
  1964. .minor = 1,
  1965. .rev = 0,
  1966. .funcs = &dce_v8_0_ip_funcs,
  1967. },
  1968. {
  1969. .type = AMD_IP_BLOCK_TYPE_GFX,
  1970. .major = 7,
  1971. .minor = 1,
  1972. .rev = 0,
  1973. .funcs = &gfx_v7_0_ip_funcs,
  1974. },
  1975. {
  1976. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1977. .major = 2,
  1978. .minor = 0,
  1979. .rev = 0,
  1980. .funcs = &cik_sdma_ip_funcs,
  1981. },
  1982. {
  1983. .type = AMD_IP_BLOCK_TYPE_UVD,
  1984. .major = 4,
  1985. .minor = 2,
  1986. .rev = 0,
  1987. .funcs = &uvd_v4_2_ip_funcs,
  1988. },
  1989. {
  1990. .type = AMD_IP_BLOCK_TYPE_VCE,
  1991. .major = 2,
  1992. .minor = 0,
  1993. .rev = 0,
  1994. .funcs = &vce_v2_0_ip_funcs,
  1995. },
  1996. };
  1997. int cik_set_ip_blocks(struct amdgpu_device *adev)
  1998. {
  1999. switch (adev->asic_type) {
  2000. case CHIP_BONAIRE:
  2001. adev->ip_blocks = bonaire_ip_blocks;
  2002. adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
  2003. break;
  2004. case CHIP_HAWAII:
  2005. adev->ip_blocks = hawaii_ip_blocks;
  2006. adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
  2007. break;
  2008. case CHIP_KAVERI:
  2009. adev->ip_blocks = kaveri_ip_blocks;
  2010. adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
  2011. break;
  2012. case CHIP_KABINI:
  2013. adev->ip_blocks = kabini_ip_blocks;
  2014. adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
  2015. break;
  2016. case CHIP_MULLINS:
  2017. adev->ip_blocks = mullins_ip_blocks;
  2018. adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
  2019. break;
  2020. default:
  2021. /* FIXME: not supported yet */
  2022. return -EINVAL;
  2023. }
  2024. return 0;
  2025. }
  2026. static const struct amdgpu_asic_funcs cik_asic_funcs =
  2027. {
  2028. .read_disabled_bios = &cik_read_disabled_bios,
  2029. .read_register = &cik_read_register,
  2030. .reset = &cik_asic_reset,
  2031. .set_vga_state = &cik_vga_set_state,
  2032. .get_xclk = &cik_get_xclk,
  2033. .set_uvd_clocks = &cik_set_uvd_clocks,
  2034. .set_vce_clocks = &cik_set_vce_clocks,
  2035. .get_cu_info = &gfx_v7_0_get_cu_info,
  2036. /* these should be moved to their own ip modules */
  2037. .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
  2038. .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
  2039. };
  2040. static int cik_common_early_init(void *handle)
  2041. {
  2042. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2043. adev->smc_rreg = &cik_smc_rreg;
  2044. adev->smc_wreg = &cik_smc_wreg;
  2045. adev->pcie_rreg = &cik_pcie_rreg;
  2046. adev->pcie_wreg = &cik_pcie_wreg;
  2047. adev->uvd_ctx_rreg = &cik_uvd_ctx_rreg;
  2048. adev->uvd_ctx_wreg = &cik_uvd_ctx_wreg;
  2049. adev->didt_rreg = &cik_didt_rreg;
  2050. adev->didt_wreg = &cik_didt_wreg;
  2051. adev->asic_funcs = &cik_asic_funcs;
  2052. adev->has_uvd = true;
  2053. adev->rev_id = cik_get_rev_id(adev);
  2054. adev->external_rev_id = 0xFF;
  2055. switch (adev->asic_type) {
  2056. case CHIP_BONAIRE:
  2057. adev->cg_flags =
  2058. AMDGPU_CG_SUPPORT_GFX_MGCG |
  2059. AMDGPU_CG_SUPPORT_GFX_MGLS |
  2060. /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
  2061. AMDGPU_CG_SUPPORT_GFX_CGLS |
  2062. AMDGPU_CG_SUPPORT_GFX_CGTS |
  2063. AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
  2064. AMDGPU_CG_SUPPORT_GFX_CP_LS |
  2065. AMDGPU_CG_SUPPORT_MC_LS |
  2066. AMDGPU_CG_SUPPORT_MC_MGCG |
  2067. AMDGPU_CG_SUPPORT_SDMA_MGCG |
  2068. AMDGPU_CG_SUPPORT_SDMA_LS |
  2069. AMDGPU_CG_SUPPORT_BIF_LS |
  2070. AMDGPU_CG_SUPPORT_VCE_MGCG |
  2071. AMDGPU_CG_SUPPORT_UVD_MGCG |
  2072. AMDGPU_CG_SUPPORT_HDP_LS |
  2073. AMDGPU_CG_SUPPORT_HDP_MGCG;
  2074. adev->pg_flags = 0;
  2075. adev->external_rev_id = adev->rev_id + 0x14;
  2076. break;
  2077. case CHIP_HAWAII:
  2078. adev->cg_flags =
  2079. AMDGPU_CG_SUPPORT_GFX_MGCG |
  2080. AMDGPU_CG_SUPPORT_GFX_MGLS |
  2081. /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
  2082. AMDGPU_CG_SUPPORT_GFX_CGLS |
  2083. AMDGPU_CG_SUPPORT_GFX_CGTS |
  2084. AMDGPU_CG_SUPPORT_GFX_CP_LS |
  2085. AMDGPU_CG_SUPPORT_MC_LS |
  2086. AMDGPU_CG_SUPPORT_MC_MGCG |
  2087. AMDGPU_CG_SUPPORT_SDMA_MGCG |
  2088. AMDGPU_CG_SUPPORT_SDMA_LS |
  2089. AMDGPU_CG_SUPPORT_BIF_LS |
  2090. AMDGPU_CG_SUPPORT_VCE_MGCG |
  2091. AMDGPU_CG_SUPPORT_UVD_MGCG |
  2092. AMDGPU_CG_SUPPORT_HDP_LS |
  2093. AMDGPU_CG_SUPPORT_HDP_MGCG;
  2094. adev->pg_flags = 0;
  2095. adev->external_rev_id = 0x28;
  2096. break;
  2097. case CHIP_KAVERI:
  2098. adev->cg_flags =
  2099. AMDGPU_CG_SUPPORT_GFX_MGCG |
  2100. AMDGPU_CG_SUPPORT_GFX_MGLS |
  2101. /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
  2102. AMDGPU_CG_SUPPORT_GFX_CGLS |
  2103. AMDGPU_CG_SUPPORT_GFX_CGTS |
  2104. AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
  2105. AMDGPU_CG_SUPPORT_GFX_CP_LS |
  2106. AMDGPU_CG_SUPPORT_SDMA_MGCG |
  2107. AMDGPU_CG_SUPPORT_SDMA_LS |
  2108. AMDGPU_CG_SUPPORT_BIF_LS |
  2109. AMDGPU_CG_SUPPORT_VCE_MGCG |
  2110. AMDGPU_CG_SUPPORT_UVD_MGCG |
  2111. AMDGPU_CG_SUPPORT_HDP_LS |
  2112. AMDGPU_CG_SUPPORT_HDP_MGCG;
  2113. adev->pg_flags =
  2114. /*AMDGPU_PG_SUPPORT_GFX_PG |
  2115. AMDGPU_PG_SUPPORT_GFX_SMG |
  2116. AMDGPU_PG_SUPPORT_GFX_DMG |*/
  2117. AMDGPU_PG_SUPPORT_UVD |
  2118. /*AMDGPU_PG_SUPPORT_VCE |
  2119. AMDGPU_PG_SUPPORT_CP |
  2120. AMDGPU_PG_SUPPORT_GDS |
  2121. AMDGPU_PG_SUPPORT_RLC_SMU_HS |
  2122. AMDGPU_PG_SUPPORT_ACP |
  2123. AMDGPU_PG_SUPPORT_SAMU |*/
  2124. 0;
  2125. if (adev->pdev->device == 0x1312 ||
  2126. adev->pdev->device == 0x1316 ||
  2127. adev->pdev->device == 0x1317)
  2128. adev->external_rev_id = 0x41;
  2129. else
  2130. adev->external_rev_id = 0x1;
  2131. break;
  2132. case CHIP_KABINI:
  2133. case CHIP_MULLINS:
  2134. adev->cg_flags =
  2135. AMDGPU_CG_SUPPORT_GFX_MGCG |
  2136. AMDGPU_CG_SUPPORT_GFX_MGLS |
  2137. /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
  2138. AMDGPU_CG_SUPPORT_GFX_CGLS |
  2139. AMDGPU_CG_SUPPORT_GFX_CGTS |
  2140. AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
  2141. AMDGPU_CG_SUPPORT_GFX_CP_LS |
  2142. AMDGPU_CG_SUPPORT_SDMA_MGCG |
  2143. AMDGPU_CG_SUPPORT_SDMA_LS |
  2144. AMDGPU_CG_SUPPORT_BIF_LS |
  2145. AMDGPU_CG_SUPPORT_VCE_MGCG |
  2146. AMDGPU_CG_SUPPORT_UVD_MGCG |
  2147. AMDGPU_CG_SUPPORT_HDP_LS |
  2148. AMDGPU_CG_SUPPORT_HDP_MGCG;
  2149. adev->pg_flags =
  2150. /*AMDGPU_PG_SUPPORT_GFX_PG |
  2151. AMDGPU_PG_SUPPORT_GFX_SMG | */
  2152. AMDGPU_PG_SUPPORT_UVD |
  2153. /*AMDGPU_PG_SUPPORT_VCE |
  2154. AMDGPU_PG_SUPPORT_CP |
  2155. AMDGPU_PG_SUPPORT_GDS |
  2156. AMDGPU_PG_SUPPORT_RLC_SMU_HS |
  2157. AMDGPU_PG_SUPPORT_SAMU |*/
  2158. 0;
  2159. if (adev->asic_type == CHIP_KABINI) {
  2160. if (adev->rev_id == 0)
  2161. adev->external_rev_id = 0x81;
  2162. else if (adev->rev_id == 1)
  2163. adev->external_rev_id = 0x82;
  2164. else if (adev->rev_id == 2)
  2165. adev->external_rev_id = 0x85;
  2166. } else
  2167. adev->external_rev_id = adev->rev_id + 0xa1;
  2168. break;
  2169. default:
  2170. /* FIXME: not supported yet */
  2171. return -EINVAL;
  2172. }
  2173. return 0;
  2174. }
  2175. static int cik_common_sw_init(void *handle)
  2176. {
  2177. return 0;
  2178. }
  2179. static int cik_common_sw_fini(void *handle)
  2180. {
  2181. return 0;
  2182. }
  2183. static int cik_common_hw_init(void *handle)
  2184. {
  2185. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2186. /* move the golden regs per IP block */
  2187. cik_init_golden_registers(adev);
  2188. /* enable pcie gen2/3 link */
  2189. cik_pcie_gen3_enable(adev);
  2190. /* enable aspm */
  2191. cik_program_aspm(adev);
  2192. return 0;
  2193. }
  2194. static int cik_common_hw_fini(void *handle)
  2195. {
  2196. return 0;
  2197. }
  2198. static int cik_common_suspend(void *handle)
  2199. {
  2200. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2201. amdgpu_amdkfd_suspend(adev);
  2202. return cik_common_hw_fini(adev);
  2203. }
  2204. static int cik_common_resume(void *handle)
  2205. {
  2206. int r;
  2207. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2208. r = cik_common_hw_init(adev);
  2209. if (r)
  2210. return r;
  2211. return amdgpu_amdkfd_resume(adev);
  2212. }
  2213. static bool cik_common_is_idle(void *handle)
  2214. {
  2215. return true;
  2216. }
  2217. static int cik_common_wait_for_idle(void *handle)
  2218. {
  2219. return 0;
  2220. }
  2221. static void cik_common_print_status(void *handle)
  2222. {
  2223. }
  2224. static int cik_common_soft_reset(void *handle)
  2225. {
  2226. /* XXX hard reset?? */
  2227. return 0;
  2228. }
  2229. static int cik_common_set_clockgating_state(void *handle,
  2230. enum amd_clockgating_state state)
  2231. {
  2232. return 0;
  2233. }
  2234. static int cik_common_set_powergating_state(void *handle,
  2235. enum amd_powergating_state state)
  2236. {
  2237. return 0;
  2238. }
  2239. const struct amd_ip_funcs cik_common_ip_funcs = {
  2240. .early_init = cik_common_early_init,
  2241. .late_init = NULL,
  2242. .sw_init = cik_common_sw_init,
  2243. .sw_fini = cik_common_sw_fini,
  2244. .hw_init = cik_common_hw_init,
  2245. .hw_fini = cik_common_hw_fini,
  2246. .suspend = cik_common_suspend,
  2247. .resume = cik_common_resume,
  2248. .is_idle = cik_common_is_idle,
  2249. .wait_for_idle = cik_common_wait_for_idle,
  2250. .soft_reset = cik_common_soft_reset,
  2251. .print_status = cik_common_print_status,
  2252. .set_clockgating_state = cik_common_set_clockgating_state,
  2253. .set_powergating_state = cik_common_set_powergating_state,
  2254. };