rs400.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include <drm/drmP.h>
  31. #include "radeon.h"
  32. #include "radeon_asic.h"
  33. #include "rs400d.h"
  34. /* This files gather functions specifics to : rs400,rs480 */
  35. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
  36. void rs400_gart_adjust_size(struct radeon_device *rdev)
  37. {
  38. /* Check gart size */
  39. switch (rdev->mc.gtt_size/(1024*1024)) {
  40. case 32:
  41. case 64:
  42. case 128:
  43. case 256:
  44. case 512:
  45. case 1024:
  46. case 2048:
  47. break;
  48. default:
  49. DRM_ERROR("Unable to use IGP GART size %uM\n",
  50. (unsigned)(rdev->mc.gtt_size >> 20));
  51. DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
  52. DRM_ERROR("Forcing to 32M GART size\n");
  53. rdev->mc.gtt_size = 32 * 1024 * 1024;
  54. return;
  55. }
  56. }
  57. void rs400_gart_tlb_flush(struct radeon_device *rdev)
  58. {
  59. uint32_t tmp;
  60. unsigned int timeout = rdev->usec_timeout;
  61. WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
  62. do {
  63. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  64. if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
  65. break;
  66. DRM_UDELAY(1);
  67. timeout--;
  68. } while (timeout > 0);
  69. WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
  70. }
  71. int rs400_gart_init(struct radeon_device *rdev)
  72. {
  73. int r;
  74. if (rdev->gart.ptr) {
  75. WARN(1, "RS400 GART already initialized\n");
  76. return 0;
  77. }
  78. /* Check gart size */
  79. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  80. case 32:
  81. case 64:
  82. case 128:
  83. case 256:
  84. case 512:
  85. case 1024:
  86. case 2048:
  87. break;
  88. default:
  89. return -EINVAL;
  90. }
  91. /* Initialize common gart structure */
  92. r = radeon_gart_init(rdev);
  93. if (r)
  94. return r;
  95. if (rs400_debugfs_pcie_gart_info_init(rdev))
  96. DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
  97. rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
  98. return radeon_gart_table_ram_alloc(rdev);
  99. }
  100. int rs400_gart_enable(struct radeon_device *rdev)
  101. {
  102. uint32_t size_reg;
  103. uint32_t tmp;
  104. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  105. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  106. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  107. /* Check gart size */
  108. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  109. case 32:
  110. size_reg = RS480_VA_SIZE_32MB;
  111. break;
  112. case 64:
  113. size_reg = RS480_VA_SIZE_64MB;
  114. break;
  115. case 128:
  116. size_reg = RS480_VA_SIZE_128MB;
  117. break;
  118. case 256:
  119. size_reg = RS480_VA_SIZE_256MB;
  120. break;
  121. case 512:
  122. size_reg = RS480_VA_SIZE_512MB;
  123. break;
  124. case 1024:
  125. size_reg = RS480_VA_SIZE_1GB;
  126. break;
  127. case 2048:
  128. size_reg = RS480_VA_SIZE_2GB;
  129. break;
  130. default:
  131. return -EINVAL;
  132. }
  133. /* It should be fine to program it to max value */
  134. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  135. WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
  136. WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
  137. } else {
  138. WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
  139. WREG32(RS480_AGP_BASE_2, 0);
  140. }
  141. tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
  142. tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
  143. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  144. WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
  145. tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
  146. WREG32(RADEON_BUS_CNTL, tmp);
  147. } else {
  148. WREG32(RADEON_MC_AGP_LOCATION, tmp);
  149. tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
  150. WREG32(RADEON_BUS_CNTL, tmp);
  151. }
  152. /* Table should be in 32bits address space so ignore bits above. */
  153. tmp = (u32)rdev->gart.table_addr & 0xfffff000;
  154. tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
  155. WREG32_MC(RS480_GART_BASE, tmp);
  156. /* TODO: more tweaking here */
  157. WREG32_MC(RS480_GART_FEATURE_ID,
  158. (RS480_TLB_ENABLE |
  159. RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
  160. /* Disable snooping */
  161. WREG32_MC(RS480_AGP_MODE_CNTL,
  162. (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
  163. /* Disable AGP mode */
  164. /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
  165. * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
  166. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  167. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  168. tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
  169. WREG32_MC(RS480_MC_MISC_CNTL, tmp);
  170. } else {
  171. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  172. tmp |= RS480_GART_INDEX_REG_EN;
  173. WREG32_MC(RS480_MC_MISC_CNTL, tmp);
  174. }
  175. /* Enable gart */
  176. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
  177. rs400_gart_tlb_flush(rdev);
  178. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  179. (unsigned)(rdev->mc.gtt_size >> 20),
  180. (unsigned long long)rdev->gart.table_addr);
  181. rdev->gart.ready = true;
  182. return 0;
  183. }
  184. void rs400_gart_disable(struct radeon_device *rdev)
  185. {
  186. uint32_t tmp;
  187. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  188. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  189. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  190. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
  191. }
  192. void rs400_gart_fini(struct radeon_device *rdev)
  193. {
  194. radeon_gart_fini(rdev);
  195. rs400_gart_disable(rdev);
  196. radeon_gart_table_ram_free(rdev);
  197. }
  198. #define RS400_PTE_UNSNOOPED (1 << 0)
  199. #define RS400_PTE_WRITEABLE (1 << 2)
  200. #define RS400_PTE_READABLE (1 << 3)
  201. uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
  202. {
  203. uint32_t entry;
  204. entry = (lower_32_bits(addr) & PAGE_MASK) |
  205. ((upper_32_bits(addr) & 0xff) << 4);
  206. if (flags & RADEON_GART_PAGE_READ)
  207. entry |= RS400_PTE_READABLE;
  208. if (flags & RADEON_GART_PAGE_WRITE)
  209. entry |= RS400_PTE_WRITEABLE;
  210. if (!(flags & RADEON_GART_PAGE_SNOOP))
  211. entry |= RS400_PTE_UNSNOOPED;
  212. return entry;
  213. }
  214. void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
  215. uint64_t entry)
  216. {
  217. u32 *gtt = rdev->gart.ptr;
  218. gtt[i] = cpu_to_le32(lower_32_bits(entry));
  219. }
  220. int rs400_mc_wait_for_idle(struct radeon_device *rdev)
  221. {
  222. unsigned i;
  223. uint32_t tmp;
  224. for (i = 0; i < rdev->usec_timeout; i++) {
  225. /* read MC_STATUS */
  226. tmp = RREG32(RADEON_MC_STATUS);
  227. if (tmp & RADEON_MC_IDLE) {
  228. return 0;
  229. }
  230. DRM_UDELAY(1);
  231. }
  232. return -1;
  233. }
  234. static void rs400_gpu_init(struct radeon_device *rdev)
  235. {
  236. /* FIXME: is this correct ? */
  237. r420_pipes_init(rdev);
  238. if (rs400_mc_wait_for_idle(rdev)) {
  239. printk(KERN_WARNING "rs400: Failed to wait MC idle while "
  240. "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
  241. }
  242. }
  243. static void rs400_mc_init(struct radeon_device *rdev)
  244. {
  245. u64 base;
  246. rs400_gart_adjust_size(rdev);
  247. rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
  248. /* DDR for all card after R300 & IGP */
  249. rdev->mc.vram_is_ddr = true;
  250. rdev->mc.vram_width = 128;
  251. r100_vram_init_sizes(rdev);
  252. base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
  253. radeon_vram_location(rdev, &rdev->mc, base);
  254. rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
  255. radeon_gtt_location(rdev, &rdev->mc);
  256. radeon_update_bandwidth_info(rdev);
  257. }
  258. uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
  259. {
  260. unsigned long flags;
  261. uint32_t r;
  262. spin_lock_irqsave(&rdev->mc_idx_lock, flags);
  263. WREG32(RS480_NB_MC_INDEX, reg & 0xff);
  264. r = RREG32(RS480_NB_MC_DATA);
  265. WREG32(RS480_NB_MC_INDEX, 0xff);
  266. spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
  267. return r;
  268. }
  269. void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
  270. {
  271. unsigned long flags;
  272. spin_lock_irqsave(&rdev->mc_idx_lock, flags);
  273. WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
  274. WREG32(RS480_NB_MC_DATA, (v));
  275. WREG32(RS480_NB_MC_INDEX, 0xff);
  276. spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
  277. }
  278. #if defined(CONFIG_DEBUG_FS)
  279. static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
  280. {
  281. struct drm_info_node *node = (struct drm_info_node *) m->private;
  282. struct drm_device *dev = node->minor->dev;
  283. struct radeon_device *rdev = dev->dev_private;
  284. uint32_t tmp;
  285. tmp = RREG32(RADEON_HOST_PATH_CNTL);
  286. seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
  287. tmp = RREG32(RADEON_BUS_CNTL);
  288. seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
  289. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  290. seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
  291. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  292. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
  293. seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
  294. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
  295. seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
  296. tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
  297. seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
  298. tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
  299. seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
  300. tmp = RREG32(RS690_HDP_FB_LOCATION);
  301. seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
  302. } else {
  303. tmp = RREG32(RADEON_AGP_BASE);
  304. seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
  305. tmp = RREG32(RS480_AGP_BASE_2);
  306. seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
  307. tmp = RREG32(RADEON_MC_AGP_LOCATION);
  308. seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
  309. }
  310. tmp = RREG32_MC(RS480_GART_BASE);
  311. seq_printf(m, "GART_BASE 0x%08x\n", tmp);
  312. tmp = RREG32_MC(RS480_GART_FEATURE_ID);
  313. seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
  314. tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
  315. seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
  316. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  317. seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
  318. tmp = RREG32_MC(0x5F);
  319. seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
  320. tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
  321. seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
  322. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  323. seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
  324. tmp = RREG32_MC(0x3B);
  325. seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
  326. tmp = RREG32_MC(0x3C);
  327. seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
  328. tmp = RREG32_MC(0x30);
  329. seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
  330. tmp = RREG32_MC(0x31);
  331. seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
  332. tmp = RREG32_MC(0x32);
  333. seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
  334. tmp = RREG32_MC(0x33);
  335. seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
  336. tmp = RREG32_MC(0x34);
  337. seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
  338. tmp = RREG32_MC(0x35);
  339. seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
  340. tmp = RREG32_MC(0x36);
  341. seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
  342. tmp = RREG32_MC(0x37);
  343. seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
  344. return 0;
  345. }
  346. static struct drm_info_list rs400_gart_info_list[] = {
  347. {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
  348. };
  349. #endif
  350. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
  351. {
  352. #if defined(CONFIG_DEBUG_FS)
  353. return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
  354. #else
  355. return 0;
  356. #endif
  357. }
  358. static void rs400_mc_program(struct radeon_device *rdev)
  359. {
  360. struct r100_mc_save save;
  361. /* Stops all mc clients */
  362. r100_mc_stop(rdev, &save);
  363. /* Wait for mc idle */
  364. if (rs400_mc_wait_for_idle(rdev))
  365. dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
  366. WREG32(R_000148_MC_FB_LOCATION,
  367. S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
  368. S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
  369. r100_mc_resume(rdev, &save);
  370. }
  371. static int rs400_startup(struct radeon_device *rdev)
  372. {
  373. int r;
  374. r100_set_common_regs(rdev);
  375. rs400_mc_program(rdev);
  376. /* Resume clock */
  377. r300_clock_startup(rdev);
  378. /* Initialize GPU configuration (# pipes, ...) */
  379. rs400_gpu_init(rdev);
  380. r100_enable_bm(rdev);
  381. /* Initialize GART (initialize after TTM so we can allocate
  382. * memory through TTM but finalize after TTM) */
  383. r = rs400_gart_enable(rdev);
  384. if (r)
  385. return r;
  386. /* allocate wb buffer */
  387. r = radeon_wb_init(rdev);
  388. if (r)
  389. return r;
  390. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  391. if (r) {
  392. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  393. return r;
  394. }
  395. /* Enable IRQ */
  396. if (!rdev->irq.installed) {
  397. r = radeon_irq_kms_init(rdev);
  398. if (r)
  399. return r;
  400. }
  401. r100_irq_set(rdev);
  402. rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  403. /* 1M ring buffer */
  404. r = r100_cp_init(rdev, 1024 * 1024);
  405. if (r) {
  406. dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
  407. return r;
  408. }
  409. r = radeon_ib_pool_init(rdev);
  410. if (r) {
  411. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  412. return r;
  413. }
  414. return 0;
  415. }
  416. int rs400_resume(struct radeon_device *rdev)
  417. {
  418. int r;
  419. /* Make sur GART are not working */
  420. rs400_gart_disable(rdev);
  421. /* Resume clock before doing reset */
  422. r300_clock_startup(rdev);
  423. /* setup MC before calling post tables */
  424. rs400_mc_program(rdev);
  425. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  426. if (radeon_asic_reset(rdev)) {
  427. dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  428. RREG32(R_000E40_RBBM_STATUS),
  429. RREG32(R_0007C0_CP_STAT));
  430. }
  431. /* post */
  432. radeon_combios_asic_init(rdev->ddev);
  433. /* Resume clock after posting */
  434. r300_clock_startup(rdev);
  435. /* Initialize surface registers */
  436. radeon_surface_init(rdev);
  437. rdev->accel_working = true;
  438. r = rs400_startup(rdev);
  439. if (r) {
  440. rdev->accel_working = false;
  441. }
  442. return r;
  443. }
  444. int rs400_suspend(struct radeon_device *rdev)
  445. {
  446. radeon_pm_suspend(rdev);
  447. r100_cp_disable(rdev);
  448. radeon_wb_disable(rdev);
  449. r100_irq_disable(rdev);
  450. rs400_gart_disable(rdev);
  451. return 0;
  452. }
  453. void rs400_fini(struct radeon_device *rdev)
  454. {
  455. radeon_pm_fini(rdev);
  456. r100_cp_fini(rdev);
  457. radeon_wb_fini(rdev);
  458. radeon_ib_pool_fini(rdev);
  459. radeon_gem_fini(rdev);
  460. rs400_gart_fini(rdev);
  461. radeon_irq_kms_fini(rdev);
  462. radeon_fence_driver_fini(rdev);
  463. radeon_bo_fini(rdev);
  464. radeon_atombios_fini(rdev);
  465. kfree(rdev->bios);
  466. rdev->bios = NULL;
  467. }
  468. int rs400_init(struct radeon_device *rdev)
  469. {
  470. int r;
  471. /* Disable VGA */
  472. r100_vga_render_disable(rdev);
  473. /* Initialize scratch registers */
  474. radeon_scratch_init(rdev);
  475. /* Initialize surface registers */
  476. radeon_surface_init(rdev);
  477. /* TODO: disable VGA need to use VGA request */
  478. /* restore some register to sane defaults */
  479. r100_restore_sanity(rdev);
  480. /* BIOS*/
  481. if (!radeon_get_bios(rdev)) {
  482. if (ASIC_IS_AVIVO(rdev))
  483. return -EINVAL;
  484. }
  485. if (rdev->is_atom_bios) {
  486. dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
  487. return -EINVAL;
  488. } else {
  489. r = radeon_combios_init(rdev);
  490. if (r)
  491. return r;
  492. }
  493. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  494. if (radeon_asic_reset(rdev)) {
  495. dev_warn(rdev->dev,
  496. "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  497. RREG32(R_000E40_RBBM_STATUS),
  498. RREG32(R_0007C0_CP_STAT));
  499. }
  500. /* check if cards are posted or not */
  501. if (radeon_boot_test_post_card(rdev) == false)
  502. return -EINVAL;
  503. /* Initialize clocks */
  504. radeon_get_clock_info(rdev->ddev);
  505. /* initialize memory controller */
  506. rs400_mc_init(rdev);
  507. /* Fence driver */
  508. r = radeon_fence_driver_init(rdev);
  509. if (r)
  510. return r;
  511. /* Memory manager */
  512. r = radeon_bo_init(rdev);
  513. if (r)
  514. return r;
  515. r = rs400_gart_init(rdev);
  516. if (r)
  517. return r;
  518. r300_set_reg_safe(rdev);
  519. /* Initialize power management */
  520. radeon_pm_init(rdev);
  521. rdev->accel_working = true;
  522. r = rs400_startup(rdev);
  523. if (r) {
  524. /* Somethings want wront with the accel init stop accel */
  525. dev_err(rdev->dev, "Disabling GPU acceleration\n");
  526. r100_cp_fini(rdev);
  527. radeon_wb_fini(rdev);
  528. radeon_ib_pool_fini(rdev);
  529. rs400_gart_fini(rdev);
  530. radeon_irq_kms_fini(rdev);
  531. rdev->accel_working = false;
  532. }
  533. return 0;
  534. }