gmc_v8_0.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include "drmP.h"
  25. #include "amdgpu.h"
  26. #include "gmc_v8_0.h"
  27. #include "amdgpu_ucode.h"
  28. #include "gmc/gmc_8_1_d.h"
  29. #include "gmc/gmc_8_1_sh_mask.h"
  30. #include "bif/bif_5_0_d.h"
  31. #include "bif/bif_5_0_sh_mask.h"
  32. #include "oss/oss_3_0_d.h"
  33. #include "oss/oss_3_0_sh_mask.h"
  34. #include "vid.h"
  35. #include "vi.h"
  36. static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
  37. static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  38. MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
  39. static const u32 golden_settings_tonga_a11[] =
  40. {
  41. mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  42. mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
  43. mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
  44. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  45. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  46. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  47. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  48. };
  49. static const u32 tonga_mgcg_cgcg_init[] =
  50. {
  51. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  52. };
  53. static const u32 golden_settings_fiji_a10[] =
  54. {
  55. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  56. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  57. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  58. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  59. };
  60. static const u32 fiji_mgcg_cgcg_init[] =
  61. {
  62. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  63. };
  64. static const u32 cz_mgcg_cgcg_init[] =
  65. {
  66. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  67. };
  68. static const u32 stoney_mgcg_cgcg_init[] =
  69. {
  70. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  71. };
  72. static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
  73. {
  74. switch (adev->asic_type) {
  75. case CHIP_FIJI:
  76. amdgpu_program_register_sequence(adev,
  77. fiji_mgcg_cgcg_init,
  78. (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
  79. amdgpu_program_register_sequence(adev,
  80. golden_settings_fiji_a10,
  81. (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
  82. break;
  83. case CHIP_TONGA:
  84. amdgpu_program_register_sequence(adev,
  85. tonga_mgcg_cgcg_init,
  86. (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
  87. amdgpu_program_register_sequence(adev,
  88. golden_settings_tonga_a11,
  89. (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
  90. break;
  91. case CHIP_CARRIZO:
  92. amdgpu_program_register_sequence(adev,
  93. cz_mgcg_cgcg_init,
  94. (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
  95. break;
  96. case CHIP_STONEY:
  97. amdgpu_program_register_sequence(adev,
  98. stoney_mgcg_cgcg_init,
  99. (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
  100. break;
  101. default:
  102. break;
  103. }
  104. }
  105. /**
  106. * gmc8_mc_wait_for_idle - wait for MC idle callback.
  107. *
  108. * @adev: amdgpu_device pointer
  109. *
  110. * Wait for the MC (memory controller) to be idle.
  111. * (evergreen+).
  112. * Returns 0 if the MC is idle, -1 if not.
  113. */
  114. int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
  115. {
  116. unsigned i;
  117. u32 tmp;
  118. for (i = 0; i < adev->usec_timeout; i++) {
  119. /* read MC_STATUS */
  120. tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
  121. SRBM_STATUS__MCB_BUSY_MASK |
  122. SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  123. SRBM_STATUS__MCC_BUSY_MASK |
  124. SRBM_STATUS__MCD_BUSY_MASK |
  125. SRBM_STATUS__VMC1_BUSY_MASK);
  126. if (!tmp)
  127. return 0;
  128. udelay(1);
  129. }
  130. return -1;
  131. }
  132. void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
  133. struct amdgpu_mode_mc_save *save)
  134. {
  135. u32 blackout;
  136. if (adev->mode_info.num_crtc)
  137. amdgpu_display_stop_mc_access(adev, save);
  138. amdgpu_asic_wait_for_mc_idle(adev);
  139. blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  140. if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  141. /* Block CPU access */
  142. WREG32(mmBIF_FB_EN, 0);
  143. /* blackout the MC */
  144. blackout = REG_SET_FIELD(blackout,
  145. MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
  146. WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
  147. }
  148. /* wait for the MC to settle */
  149. udelay(100);
  150. }
  151. void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
  152. struct amdgpu_mode_mc_save *save)
  153. {
  154. u32 tmp;
  155. /* unblackout the MC */
  156. tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  157. tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  158. WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  159. /* allow CPU access */
  160. tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  161. tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
  162. WREG32(mmBIF_FB_EN, tmp);
  163. if (adev->mode_info.num_crtc)
  164. amdgpu_display_resume_mc_access(adev, save);
  165. }
  166. /**
  167. * gmc_v8_0_init_microcode - load ucode images from disk
  168. *
  169. * @adev: amdgpu_device pointer
  170. *
  171. * Use the firmware interface to load the ucode images into
  172. * the driver (not loaded into hw).
  173. * Returns 0 on success, error on failure.
  174. */
  175. static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
  176. {
  177. const char *chip_name;
  178. char fw_name[30];
  179. int err;
  180. DRM_DEBUG("\n");
  181. switch (adev->asic_type) {
  182. case CHIP_TONGA:
  183. chip_name = "tonga";
  184. break;
  185. case CHIP_FIJI:
  186. case CHIP_CARRIZO:
  187. case CHIP_STONEY:
  188. return 0;
  189. default: BUG();
  190. }
  191. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
  192. err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
  193. if (err)
  194. goto out;
  195. err = amdgpu_ucode_validate(adev->mc.fw);
  196. out:
  197. if (err) {
  198. printk(KERN_ERR
  199. "mc: Failed to load firmware \"%s\"\n",
  200. fw_name);
  201. release_firmware(adev->mc.fw);
  202. adev->mc.fw = NULL;
  203. }
  204. return err;
  205. }
  206. /**
  207. * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
  208. *
  209. * @adev: amdgpu_device pointer
  210. *
  211. * Load the GDDR MC ucode into the hw (CIK).
  212. * Returns 0 on success, error on failure.
  213. */
  214. static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
  215. {
  216. const struct mc_firmware_header_v1_0 *hdr;
  217. const __le32 *fw_data = NULL;
  218. const __le32 *io_mc_regs = NULL;
  219. u32 running, blackout = 0;
  220. int i, ucode_size, regs_size;
  221. if (!adev->mc.fw)
  222. return -EINVAL;
  223. hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
  224. amdgpu_ucode_print_mc_hdr(&hdr->header);
  225. adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
  226. regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
  227. io_mc_regs = (const __le32 *)
  228. (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
  229. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  230. fw_data = (const __le32 *)
  231. (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  232. running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
  233. if (running == 0) {
  234. if (running) {
  235. blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  236. WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  237. }
  238. /* reset the engine and set to writable */
  239. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  240. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
  241. /* load mc io regs */
  242. for (i = 0; i < regs_size; i++) {
  243. WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
  244. WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
  245. }
  246. /* load the MC ucode */
  247. for (i = 0; i < ucode_size; i++)
  248. WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
  249. /* put the engine back into the active state */
  250. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  251. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
  252. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
  253. /* wait for training to complete */
  254. for (i = 0; i < adev->usec_timeout; i++) {
  255. if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
  256. MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
  257. break;
  258. udelay(1);
  259. }
  260. for (i = 0; i < adev->usec_timeout; i++) {
  261. if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
  262. MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
  263. break;
  264. udelay(1);
  265. }
  266. if (running)
  267. WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
  268. }
  269. return 0;
  270. }
  271. static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
  272. struct amdgpu_mc *mc)
  273. {
  274. if (mc->mc_vram_size > 0xFFC0000000ULL) {
  275. /* leave room for at least 1024M GTT */
  276. dev_warn(adev->dev, "limiting VRAM\n");
  277. mc->real_vram_size = 0xFFC0000000ULL;
  278. mc->mc_vram_size = 0xFFC0000000ULL;
  279. }
  280. amdgpu_vram_location(adev, &adev->mc, 0);
  281. adev->mc.gtt_base_align = 0;
  282. amdgpu_gtt_location(adev, mc);
  283. }
  284. /**
  285. * gmc_v8_0_mc_program - program the GPU memory controller
  286. *
  287. * @adev: amdgpu_device pointer
  288. *
  289. * Set the location of vram, gart, and AGP in the GPU's
  290. * physical address space (CIK).
  291. */
  292. static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
  293. {
  294. struct amdgpu_mode_mc_save save;
  295. u32 tmp;
  296. int i, j;
  297. /* Initialize HDP */
  298. for (i = 0, j = 0; i < 32; i++, j += 0x6) {
  299. WREG32((0xb05 + j), 0x00000000);
  300. WREG32((0xb06 + j), 0x00000000);
  301. WREG32((0xb07 + j), 0x00000000);
  302. WREG32((0xb08 + j), 0x00000000);
  303. WREG32((0xb09 + j), 0x00000000);
  304. }
  305. WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
  306. if (adev->mode_info.num_crtc)
  307. amdgpu_display_set_vga_render_state(adev, false);
  308. gmc_v8_0_mc_stop(adev, &save);
  309. if (amdgpu_asic_wait_for_mc_idle(adev)) {
  310. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  311. }
  312. /* Update configuration */
  313. WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  314. adev->mc.vram_start >> 12);
  315. WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  316. adev->mc.vram_end >> 12);
  317. WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  318. adev->vram_scratch.gpu_addr >> 12);
  319. tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
  320. tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
  321. WREG32(mmMC_VM_FB_LOCATION, tmp);
  322. /* XXX double check these! */
  323. WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
  324. WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
  325. WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  326. WREG32(mmMC_VM_AGP_BASE, 0);
  327. WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
  328. WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
  329. if (amdgpu_asic_wait_for_mc_idle(adev)) {
  330. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  331. }
  332. gmc_v8_0_mc_resume(adev, &save);
  333. WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
  334. tmp = RREG32(mmHDP_MISC_CNTL);
  335. tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
  336. WREG32(mmHDP_MISC_CNTL, tmp);
  337. tmp = RREG32(mmHDP_HOST_PATH_CNTL);
  338. WREG32(mmHDP_HOST_PATH_CNTL, tmp);
  339. }
  340. /**
  341. * gmc_v8_0_mc_init - initialize the memory controller driver params
  342. *
  343. * @adev: amdgpu_device pointer
  344. *
  345. * Look up the amount of vram, vram width, and decide how to place
  346. * vram and gart within the GPU's physical address space (CIK).
  347. * Returns 0 for success.
  348. */
  349. static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
  350. {
  351. u32 tmp;
  352. int chansize, numchan;
  353. /* Get VRAM informations */
  354. tmp = RREG32(mmMC_ARB_RAMCFG);
  355. if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
  356. chansize = 64;
  357. } else {
  358. chansize = 32;
  359. }
  360. tmp = RREG32(mmMC_SHARED_CHMAP);
  361. switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
  362. case 0:
  363. default:
  364. numchan = 1;
  365. break;
  366. case 1:
  367. numchan = 2;
  368. break;
  369. case 2:
  370. numchan = 4;
  371. break;
  372. case 3:
  373. numchan = 8;
  374. break;
  375. case 4:
  376. numchan = 3;
  377. break;
  378. case 5:
  379. numchan = 6;
  380. break;
  381. case 6:
  382. numchan = 10;
  383. break;
  384. case 7:
  385. numchan = 12;
  386. break;
  387. case 8:
  388. numchan = 16;
  389. break;
  390. }
  391. adev->mc.vram_width = numchan * chansize;
  392. /* Could aper size report 0 ? */
  393. adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
  394. adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
  395. /* size in MB on si */
  396. adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  397. adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  398. adev->mc.visible_vram_size = adev->mc.aper_size;
  399. /* unless the user had overridden it, set the gart
  400. * size equal to the 1024 or vram, whichever is larger.
  401. */
  402. if (amdgpu_gart_size == -1)
  403. adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
  404. else
  405. adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
  406. gmc_v8_0_vram_gtt_location(adev, &adev->mc);
  407. return 0;
  408. }
  409. /*
  410. * GART
  411. * VMID 0 is the physical GPU addresses as used by the kernel.
  412. * VMIDs 1-15 are used for userspace clients and are handled
  413. * by the amdgpu vm/hsa code.
  414. */
  415. /**
  416. * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
  417. *
  418. * @adev: amdgpu_device pointer
  419. * @vmid: vm instance to flush
  420. *
  421. * Flush the TLB for the requested page table (CIK).
  422. */
  423. static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
  424. uint32_t vmid)
  425. {
  426. /* flush hdp cache */
  427. WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  428. /* bits 0-15 are the VM contexts0-15 */
  429. WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
  430. }
  431. /**
  432. * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
  433. *
  434. * @adev: amdgpu_device pointer
  435. * @cpu_pt_addr: cpu address of the page table
  436. * @gpu_page_idx: entry in the page table to update
  437. * @addr: dst addr to write into pte/pde
  438. * @flags: access flags
  439. *
  440. * Update the page tables using the CPU.
  441. */
  442. static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
  443. void *cpu_pt_addr,
  444. uint32_t gpu_page_idx,
  445. uint64_t addr,
  446. uint32_t flags)
  447. {
  448. void __iomem *ptr = (void *)cpu_pt_addr;
  449. uint64_t value;
  450. /*
  451. * PTE format on VI:
  452. * 63:40 reserved
  453. * 39:12 4k physical page base address
  454. * 11:7 fragment
  455. * 6 write
  456. * 5 read
  457. * 4 exe
  458. * 3 reserved
  459. * 2 snooped
  460. * 1 system
  461. * 0 valid
  462. *
  463. * PDE format on VI:
  464. * 63:59 block fragment size
  465. * 58:40 reserved
  466. * 39:1 physical base address of PTE
  467. * bits 5:1 must be 0.
  468. * 0 valid
  469. */
  470. value = addr & 0x000000FFFFFFF000ULL;
  471. value |= flags;
  472. writeq(value, ptr + (gpu_page_idx * 8));
  473. return 0;
  474. }
  475. /**
  476. * gmc_v8_0_set_fault_enable_default - update VM fault handling
  477. *
  478. * @adev: amdgpu_device pointer
  479. * @value: true redirects VM faults to the default page
  480. */
  481. static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
  482. bool value)
  483. {
  484. u32 tmp;
  485. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  486. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  487. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  488. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  489. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  490. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  491. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  492. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  493. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  494. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  495. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  496. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  497. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  498. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  499. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  500. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  501. }
  502. /**
  503. * gmc_v8_0_gart_enable - gart enable
  504. *
  505. * @adev: amdgpu_device pointer
  506. *
  507. * This sets up the TLBs, programs the page tables for VMID0,
  508. * sets up the hw for VMIDs 1-15 which are allocated on
  509. * demand, and sets up the global locations for the LDS, GDS,
  510. * and GPUVM for FSA64 clients (CIK).
  511. * Returns 0 for success, errors for failure.
  512. */
  513. static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
  514. {
  515. int r, i;
  516. u32 tmp;
  517. if (adev->gart.robj == NULL) {
  518. dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
  519. return -EINVAL;
  520. }
  521. r = amdgpu_gart_table_vram_pin(adev);
  522. if (r)
  523. return r;
  524. /* Setup TLB control */
  525. tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
  526. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
  527. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
  528. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
  529. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
  530. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
  531. WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
  532. /* Setup L2 cache */
  533. tmp = RREG32(mmVM_L2_CNTL);
  534. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
  535. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
  536. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
  537. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
  538. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
  539. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
  540. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
  541. WREG32(mmVM_L2_CNTL, tmp);
  542. tmp = RREG32(mmVM_L2_CNTL2);
  543. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
  544. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
  545. WREG32(mmVM_L2_CNTL2, tmp);
  546. tmp = RREG32(mmVM_L2_CNTL3);
  547. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
  548. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
  549. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
  550. WREG32(mmVM_L2_CNTL3, tmp);
  551. /* XXX: set to enable PTE/PDE in system memory */
  552. tmp = RREG32(mmVM_L2_CNTL4);
  553. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
  554. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
  555. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
  556. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
  557. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
  558. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
  559. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
  560. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
  561. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
  562. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
  563. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
  564. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
  565. WREG32(mmVM_L2_CNTL4, tmp);
  566. /* setup context0 */
  567. WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
  568. WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
  569. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
  570. WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  571. (u32)(adev->dummy_page.addr >> 12));
  572. WREG32(mmVM_CONTEXT0_CNTL2, 0);
  573. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  574. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
  575. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
  576. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  577. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  578. WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
  579. WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
  580. WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
  581. /* empty context1-15 */
  582. /* FIXME start with 4G, once using 2 level pt switch to full
  583. * vm size space
  584. */
  585. /* set vm size, must be a multiple of 4 */
  586. WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  587. WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
  588. for (i = 1; i < 16; i++) {
  589. if (i < 8)
  590. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
  591. adev->gart.table_addr >> 12);
  592. else
  593. WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
  594. adev->gart.table_addr >> 12);
  595. }
  596. /* enable context1-15 */
  597. WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  598. (u32)(adev->dummy_page.addr >> 12));
  599. WREG32(mmVM_CONTEXT1_CNTL2, 4);
  600. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  601. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
  602. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
  603. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  604. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  605. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  606. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  607. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  608. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  609. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  610. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
  611. amdgpu_vm_block_size - 9);
  612. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  613. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
  614. gmc_v8_0_set_fault_enable_default(adev, false);
  615. else
  616. gmc_v8_0_set_fault_enable_default(adev, true);
  617. gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
  618. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  619. (unsigned)(adev->mc.gtt_size >> 20),
  620. (unsigned long long)adev->gart.table_addr);
  621. adev->gart.ready = true;
  622. return 0;
  623. }
  624. static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
  625. {
  626. int r;
  627. if (adev->gart.robj) {
  628. WARN(1, "R600 PCIE GART already initialized\n");
  629. return 0;
  630. }
  631. /* Initialize common gart structure */
  632. r = amdgpu_gart_init(adev);
  633. if (r)
  634. return r;
  635. adev->gart.table_size = adev->gart.num_gpu_pages * 8;
  636. return amdgpu_gart_table_vram_alloc(adev);
  637. }
  638. /**
  639. * gmc_v8_0_gart_disable - gart disable
  640. *
  641. * @adev: amdgpu_device pointer
  642. *
  643. * This disables all VM page table (CIK).
  644. */
  645. static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
  646. {
  647. u32 tmp;
  648. /* Disable all tables */
  649. WREG32(mmVM_CONTEXT0_CNTL, 0);
  650. WREG32(mmVM_CONTEXT1_CNTL, 0);
  651. /* Setup TLB control */
  652. tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
  653. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
  654. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
  655. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
  656. WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
  657. /* Setup L2 cache */
  658. tmp = RREG32(mmVM_L2_CNTL);
  659. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
  660. WREG32(mmVM_L2_CNTL, tmp);
  661. WREG32(mmVM_L2_CNTL2, 0);
  662. amdgpu_gart_table_vram_unpin(adev);
  663. }
  664. /**
  665. * gmc_v8_0_gart_fini - vm fini callback
  666. *
  667. * @adev: amdgpu_device pointer
  668. *
  669. * Tears down the driver GART/VM setup (CIK).
  670. */
  671. static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
  672. {
  673. amdgpu_gart_table_vram_free(adev);
  674. amdgpu_gart_fini(adev);
  675. }
  676. /*
  677. * vm
  678. * VMID 0 is the physical GPU addresses as used by the kernel.
  679. * VMIDs 1-15 are used for userspace clients and are handled
  680. * by the amdgpu vm/hsa code.
  681. */
  682. /**
  683. * gmc_v8_0_vm_init - cik vm init callback
  684. *
  685. * @adev: amdgpu_device pointer
  686. *
  687. * Inits cik specific vm parameters (number of VMs, base of vram for
  688. * VMIDs 1-15) (CIK).
  689. * Returns 0 for success.
  690. */
  691. static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
  692. {
  693. /*
  694. * number of VMs
  695. * VMID 0 is reserved for System
  696. * amdgpu graphics/compute will use VMIDs 1-7
  697. * amdkfd will use VMIDs 8-15
  698. */
  699. adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
  700. /* base offset of vram pages */
  701. if (adev->flags & AMD_IS_APU) {
  702. u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
  703. tmp <<= 22;
  704. adev->vm_manager.vram_base_offset = tmp;
  705. } else
  706. adev->vm_manager.vram_base_offset = 0;
  707. return 0;
  708. }
  709. /**
  710. * gmc_v8_0_vm_fini - cik vm fini callback
  711. *
  712. * @adev: amdgpu_device pointer
  713. *
  714. * Tear down any asic specific VM setup (CIK).
  715. */
  716. static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
  717. {
  718. }
  719. /**
  720. * gmc_v8_0_vm_decode_fault - print human readable fault info
  721. *
  722. * @adev: amdgpu_device pointer
  723. * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  724. * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
  725. *
  726. * Print human readable fault information (CIK).
  727. */
  728. static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
  729. u32 status, u32 addr, u32 mc_client)
  730. {
  731. u32 mc_id;
  732. u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
  733. u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  734. PROTECTIONS);
  735. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  736. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  737. mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  738. MEMORY_CLIENT_ID);
  739. printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  740. protections, vmid, addr,
  741. REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  742. MEMORY_CLIENT_RW) ?
  743. "write" : "read", block, mc_client, mc_id);
  744. }
  745. static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
  746. {
  747. switch (mc_seq_vram_type) {
  748. case MC_SEQ_MISC0__MT__GDDR1:
  749. return AMDGPU_VRAM_TYPE_GDDR1;
  750. case MC_SEQ_MISC0__MT__DDR2:
  751. return AMDGPU_VRAM_TYPE_DDR2;
  752. case MC_SEQ_MISC0__MT__GDDR3:
  753. return AMDGPU_VRAM_TYPE_GDDR3;
  754. case MC_SEQ_MISC0__MT__GDDR4:
  755. return AMDGPU_VRAM_TYPE_GDDR4;
  756. case MC_SEQ_MISC0__MT__GDDR5:
  757. return AMDGPU_VRAM_TYPE_GDDR5;
  758. case MC_SEQ_MISC0__MT__HBM:
  759. return AMDGPU_VRAM_TYPE_HBM;
  760. case MC_SEQ_MISC0__MT__DDR3:
  761. return AMDGPU_VRAM_TYPE_DDR3;
  762. default:
  763. return AMDGPU_VRAM_TYPE_UNKNOWN;
  764. }
  765. }
  766. static int gmc_v8_0_early_init(void *handle)
  767. {
  768. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  769. gmc_v8_0_set_gart_funcs(adev);
  770. gmc_v8_0_set_irq_funcs(adev);
  771. return 0;
  772. }
  773. static int gmc_v8_0_late_init(void *handle)
  774. {
  775. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  776. return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
  777. }
  778. #define mmMC_SEQ_MISC0_FIJI 0xA71
  779. static int gmc_v8_0_sw_init(void *handle)
  780. {
  781. int r;
  782. int dma_bits;
  783. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  784. r = amdgpu_gem_init(adev);
  785. if (r)
  786. return r;
  787. if (adev->flags & AMD_IS_APU) {
  788. adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
  789. } else {
  790. u32 tmp;
  791. if (adev->asic_type == CHIP_FIJI)
  792. tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
  793. else
  794. tmp = RREG32(mmMC_SEQ_MISC0);
  795. tmp &= MC_SEQ_MISC0__MT__MASK;
  796. adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
  797. }
  798. r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
  799. if (r)
  800. return r;
  801. r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
  802. if (r)
  803. return r;
  804. /* Adjust VM size here.
  805. * Currently set to 4GB ((1 << 20) 4k pages).
  806. * Max GPUVM size for cayman and SI is 40 bits.
  807. */
  808. adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
  809. /* Set the internal MC address mask
  810. * This is the max address of the GPU's
  811. * internal address space.
  812. */
  813. adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
  814. /* set DMA mask + need_dma32 flags.
  815. * PCIE - can handle 40-bits.
  816. * IGP - can handle 40-bits
  817. * PCI - dma32 for legacy pci gart, 40 bits on newer asics
  818. */
  819. adev->need_dma32 = false;
  820. dma_bits = adev->need_dma32 ? 32 : 40;
  821. r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  822. if (r) {
  823. adev->need_dma32 = true;
  824. dma_bits = 32;
  825. printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
  826. }
  827. r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  828. if (r) {
  829. pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
  830. printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
  831. }
  832. r = gmc_v8_0_init_microcode(adev);
  833. if (r) {
  834. DRM_ERROR("Failed to load mc firmware!\n");
  835. return r;
  836. }
  837. r = gmc_v8_0_mc_init(adev);
  838. if (r)
  839. return r;
  840. /* Memory manager */
  841. r = amdgpu_bo_init(adev);
  842. if (r)
  843. return r;
  844. r = gmc_v8_0_gart_init(adev);
  845. if (r)
  846. return r;
  847. if (!adev->vm_manager.enabled) {
  848. r = gmc_v8_0_vm_init(adev);
  849. if (r) {
  850. dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
  851. return r;
  852. }
  853. adev->vm_manager.enabled = true;
  854. }
  855. return r;
  856. }
  857. static int gmc_v8_0_sw_fini(void *handle)
  858. {
  859. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  860. if (adev->vm_manager.enabled) {
  861. amdgpu_vm_manager_fini(adev);
  862. gmc_v8_0_vm_fini(adev);
  863. adev->vm_manager.enabled = false;
  864. }
  865. gmc_v8_0_gart_fini(adev);
  866. amdgpu_gem_fini(adev);
  867. amdgpu_bo_fini(adev);
  868. return 0;
  869. }
  870. static int gmc_v8_0_hw_init(void *handle)
  871. {
  872. int r;
  873. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  874. gmc_v8_0_init_golden_registers(adev);
  875. gmc_v8_0_mc_program(adev);
  876. if (adev->asic_type == CHIP_TONGA) {
  877. r = gmc_v8_0_mc_load_microcode(adev);
  878. if (r) {
  879. DRM_ERROR("Failed to load MC firmware!\n");
  880. return r;
  881. }
  882. }
  883. r = gmc_v8_0_gart_enable(adev);
  884. if (r)
  885. return r;
  886. return r;
  887. }
  888. static int gmc_v8_0_hw_fini(void *handle)
  889. {
  890. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  891. amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
  892. gmc_v8_0_gart_disable(adev);
  893. return 0;
  894. }
  895. static int gmc_v8_0_suspend(void *handle)
  896. {
  897. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  898. if (adev->vm_manager.enabled) {
  899. amdgpu_vm_manager_fini(adev);
  900. gmc_v8_0_vm_fini(adev);
  901. adev->vm_manager.enabled = false;
  902. }
  903. gmc_v8_0_hw_fini(adev);
  904. return 0;
  905. }
  906. static int gmc_v8_0_resume(void *handle)
  907. {
  908. int r;
  909. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  910. r = gmc_v8_0_hw_init(adev);
  911. if (r)
  912. return r;
  913. if (!adev->vm_manager.enabled) {
  914. r = gmc_v8_0_vm_init(adev);
  915. if (r) {
  916. dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
  917. return r;
  918. }
  919. adev->vm_manager.enabled = true;
  920. }
  921. return r;
  922. }
  923. static bool gmc_v8_0_is_idle(void *handle)
  924. {
  925. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  926. u32 tmp = RREG32(mmSRBM_STATUS);
  927. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  928. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
  929. return false;
  930. return true;
  931. }
  932. static int gmc_v8_0_wait_for_idle(void *handle)
  933. {
  934. unsigned i;
  935. u32 tmp;
  936. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  937. for (i = 0; i < adev->usec_timeout; i++) {
  938. /* read MC_STATUS */
  939. tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
  940. SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  941. SRBM_STATUS__MCC_BUSY_MASK |
  942. SRBM_STATUS__MCD_BUSY_MASK |
  943. SRBM_STATUS__VMC_BUSY_MASK |
  944. SRBM_STATUS__VMC1_BUSY_MASK);
  945. if (!tmp)
  946. return 0;
  947. udelay(1);
  948. }
  949. return -ETIMEDOUT;
  950. }
  951. static void gmc_v8_0_print_status(void *handle)
  952. {
  953. int i, j;
  954. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  955. dev_info(adev->dev, "GMC 8.x registers\n");
  956. dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
  957. RREG32(mmSRBM_STATUS));
  958. dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
  959. RREG32(mmSRBM_STATUS2));
  960. dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  961. RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
  962. dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  963. RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
  964. dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
  965. RREG32(mmMC_VM_MX_L1_TLB_CNTL));
  966. dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
  967. RREG32(mmVM_L2_CNTL));
  968. dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
  969. RREG32(mmVM_L2_CNTL2));
  970. dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
  971. RREG32(mmVM_L2_CNTL3));
  972. dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n",
  973. RREG32(mmVM_L2_CNTL4));
  974. dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
  975. RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
  976. dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
  977. RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
  978. dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
  979. RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
  980. dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
  981. RREG32(mmVM_CONTEXT0_CNTL2));
  982. dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
  983. RREG32(mmVM_CONTEXT0_CNTL));
  984. dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
  985. RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
  986. dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
  987. RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
  988. dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
  989. RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
  990. dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
  991. RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
  992. dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
  993. RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
  994. dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
  995. RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
  996. dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
  997. RREG32(mmVM_CONTEXT1_CNTL2));
  998. dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
  999. RREG32(mmVM_CONTEXT1_CNTL));
  1000. for (i = 0; i < 16; i++) {
  1001. if (i < 8)
  1002. dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
  1003. i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
  1004. else
  1005. dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
  1006. i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
  1007. }
  1008. dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
  1009. RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
  1010. dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
  1011. RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
  1012. dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
  1013. RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
  1014. dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
  1015. RREG32(mmMC_VM_FB_LOCATION));
  1016. dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
  1017. RREG32(mmMC_VM_AGP_BASE));
  1018. dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
  1019. RREG32(mmMC_VM_AGP_TOP));
  1020. dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
  1021. RREG32(mmMC_VM_AGP_BOT));
  1022. dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
  1023. RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
  1024. dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
  1025. RREG32(mmHDP_NONSURFACE_BASE));
  1026. dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
  1027. RREG32(mmHDP_NONSURFACE_INFO));
  1028. dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
  1029. RREG32(mmHDP_NONSURFACE_SIZE));
  1030. dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
  1031. RREG32(mmHDP_MISC_CNTL));
  1032. dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
  1033. RREG32(mmHDP_HOST_PATH_CNTL));
  1034. for (i = 0, j = 0; i < 32; i++, j += 0x6) {
  1035. dev_info(adev->dev, " %d:\n", i);
  1036. dev_info(adev->dev, " 0x%04X=0x%08X\n",
  1037. 0xb05 + j, RREG32(0xb05 + j));
  1038. dev_info(adev->dev, " 0x%04X=0x%08X\n",
  1039. 0xb06 + j, RREG32(0xb06 + j));
  1040. dev_info(adev->dev, " 0x%04X=0x%08X\n",
  1041. 0xb07 + j, RREG32(0xb07 + j));
  1042. dev_info(adev->dev, " 0x%04X=0x%08X\n",
  1043. 0xb08 + j, RREG32(0xb08 + j));
  1044. dev_info(adev->dev, " 0x%04X=0x%08X\n",
  1045. 0xb09 + j, RREG32(0xb09 + j));
  1046. }
  1047. dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
  1048. RREG32(mmBIF_FB_EN));
  1049. }
  1050. static int gmc_v8_0_soft_reset(void *handle)
  1051. {
  1052. struct amdgpu_mode_mc_save save;
  1053. u32 srbm_soft_reset = 0;
  1054. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1055. u32 tmp = RREG32(mmSRBM_STATUS);
  1056. if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
  1057. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  1058. SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
  1059. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1060. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
  1061. if (!(adev->flags & AMD_IS_APU))
  1062. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  1063. SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
  1064. }
  1065. if (srbm_soft_reset) {
  1066. gmc_v8_0_print_status((void *)adev);
  1067. gmc_v8_0_mc_stop(adev, &save);
  1068. if (gmc_v8_0_wait_for_idle(adev)) {
  1069. dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
  1070. }
  1071. tmp = RREG32(mmSRBM_SOFT_RESET);
  1072. tmp |= srbm_soft_reset;
  1073. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  1074. WREG32(mmSRBM_SOFT_RESET, tmp);
  1075. tmp = RREG32(mmSRBM_SOFT_RESET);
  1076. udelay(50);
  1077. tmp &= ~srbm_soft_reset;
  1078. WREG32(mmSRBM_SOFT_RESET, tmp);
  1079. tmp = RREG32(mmSRBM_SOFT_RESET);
  1080. /* Wait a little for things to settle down */
  1081. udelay(50);
  1082. gmc_v8_0_mc_resume(adev, &save);
  1083. udelay(50);
  1084. gmc_v8_0_print_status((void *)adev);
  1085. }
  1086. return 0;
  1087. }
  1088. static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  1089. struct amdgpu_irq_src *src,
  1090. unsigned type,
  1091. enum amdgpu_interrupt_state state)
  1092. {
  1093. u32 tmp;
  1094. u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1095. VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1096. VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1097. VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1098. VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1099. VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1100. VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
  1101. switch (state) {
  1102. case AMDGPU_IRQ_STATE_DISABLE:
  1103. /* system context */
  1104. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  1105. tmp &= ~bits;
  1106. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  1107. /* VMs */
  1108. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  1109. tmp &= ~bits;
  1110. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  1111. break;
  1112. case AMDGPU_IRQ_STATE_ENABLE:
  1113. /* system context */
  1114. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  1115. tmp |= bits;
  1116. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  1117. /* VMs */
  1118. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  1119. tmp |= bits;
  1120. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  1121. break;
  1122. default:
  1123. break;
  1124. }
  1125. return 0;
  1126. }
  1127. static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
  1128. struct amdgpu_irq_src *source,
  1129. struct amdgpu_iv_entry *entry)
  1130. {
  1131. u32 addr, status, mc_client;
  1132. addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
  1133. status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
  1134. mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
  1135. /* reset addr and status */
  1136. WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
  1137. if (!addr && !status)
  1138. return 0;
  1139. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
  1140. gmc_v8_0_set_fault_enable_default(adev, false);
  1141. dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
  1142. entry->src_id, entry->src_data);
  1143. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  1144. addr);
  1145. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  1146. status);
  1147. gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
  1148. return 0;
  1149. }
  1150. static int gmc_v8_0_set_clockgating_state(void *handle,
  1151. enum amd_clockgating_state state)
  1152. {
  1153. return 0;
  1154. }
  1155. static int gmc_v8_0_set_powergating_state(void *handle,
  1156. enum amd_powergating_state state)
  1157. {
  1158. return 0;
  1159. }
  1160. const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
  1161. .early_init = gmc_v8_0_early_init,
  1162. .late_init = gmc_v8_0_late_init,
  1163. .sw_init = gmc_v8_0_sw_init,
  1164. .sw_fini = gmc_v8_0_sw_fini,
  1165. .hw_init = gmc_v8_0_hw_init,
  1166. .hw_fini = gmc_v8_0_hw_fini,
  1167. .suspend = gmc_v8_0_suspend,
  1168. .resume = gmc_v8_0_resume,
  1169. .is_idle = gmc_v8_0_is_idle,
  1170. .wait_for_idle = gmc_v8_0_wait_for_idle,
  1171. .soft_reset = gmc_v8_0_soft_reset,
  1172. .print_status = gmc_v8_0_print_status,
  1173. .set_clockgating_state = gmc_v8_0_set_clockgating_state,
  1174. .set_powergating_state = gmc_v8_0_set_powergating_state,
  1175. };
  1176. static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
  1177. .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
  1178. .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
  1179. };
  1180. static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
  1181. .set = gmc_v8_0_vm_fault_interrupt_state,
  1182. .process = gmc_v8_0_process_interrupt,
  1183. };
  1184. static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
  1185. {
  1186. if (adev->gart.gart_funcs == NULL)
  1187. adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
  1188. }
  1189. static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
  1190. {
  1191. adev->mc.vm_fault.num_types = 1;
  1192. adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
  1193. }