radeon_ttm.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196
  1. /*
  2. * Copyright 2009 Jerome Glisse.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Jerome Glisse <glisse@freedesktop.org>
  29. * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30. * Dave Airlie
  31. */
  32. #include <ttm/ttm_bo_api.h>
  33. #include <ttm/ttm_bo_driver.h>
  34. #include <ttm/ttm_placement.h>
  35. #include <ttm/ttm_module.h>
  36. #include <ttm/ttm_page_alloc.h>
  37. #include <drm/drmP.h>
  38. #include <drm/radeon_drm.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/slab.h>
  41. #include <linux/swiotlb.h>
  42. #include <linux/swap.h>
  43. #include <linux/pagemap.h>
  44. #include <linux/debugfs.h>
  45. #include "radeon_reg.h"
  46. #include "radeon.h"
  47. #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
  48. static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
  49. static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
  50. static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
  51. {
  52. struct radeon_mman *mman;
  53. struct radeon_device *rdev;
  54. mman = container_of(bdev, struct radeon_mman, bdev);
  55. rdev = container_of(mman, struct radeon_device, mman);
  56. return rdev;
  57. }
  58. /*
  59. * Global memory.
  60. */
  61. static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
  62. {
  63. return ttm_mem_global_init(ref->object);
  64. }
  65. static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
  66. {
  67. ttm_mem_global_release(ref->object);
  68. }
  69. static int radeon_ttm_global_init(struct radeon_device *rdev)
  70. {
  71. struct drm_global_reference *global_ref;
  72. int r;
  73. rdev->mman.mem_global_referenced = false;
  74. global_ref = &rdev->mman.mem_global_ref;
  75. global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  76. global_ref->size = sizeof(struct ttm_mem_global);
  77. global_ref->init = &radeon_ttm_mem_global_init;
  78. global_ref->release = &radeon_ttm_mem_global_release;
  79. r = drm_global_item_ref(global_ref);
  80. if (r != 0) {
  81. DRM_ERROR("Failed setting up TTM memory accounting "
  82. "subsystem.\n");
  83. return r;
  84. }
  85. rdev->mman.bo_global_ref.mem_glob =
  86. rdev->mman.mem_global_ref.object;
  87. global_ref = &rdev->mman.bo_global_ref.ref;
  88. global_ref->global_type = DRM_GLOBAL_TTM_BO;
  89. global_ref->size = sizeof(struct ttm_bo_global);
  90. global_ref->init = &ttm_bo_global_init;
  91. global_ref->release = &ttm_bo_global_release;
  92. r = drm_global_item_ref(global_ref);
  93. if (r != 0) {
  94. DRM_ERROR("Failed setting up TTM BO subsystem.\n");
  95. drm_global_item_unref(&rdev->mman.mem_global_ref);
  96. return r;
  97. }
  98. rdev->mman.mem_global_referenced = true;
  99. return 0;
  100. }
  101. static void radeon_ttm_global_fini(struct radeon_device *rdev)
  102. {
  103. if (rdev->mman.mem_global_referenced) {
  104. drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
  105. drm_global_item_unref(&rdev->mman.mem_global_ref);
  106. rdev->mman.mem_global_referenced = false;
  107. }
  108. }
  109. static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  110. {
  111. return 0;
  112. }
  113. static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  114. struct ttm_mem_type_manager *man)
  115. {
  116. struct radeon_device *rdev;
  117. rdev = radeon_get_rdev(bdev);
  118. switch (type) {
  119. case TTM_PL_SYSTEM:
  120. /* System memory */
  121. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  122. man->available_caching = TTM_PL_MASK_CACHING;
  123. man->default_caching = TTM_PL_FLAG_CACHED;
  124. break;
  125. case TTM_PL_TT:
  126. man->func = &ttm_bo_manager_func;
  127. man->gpu_offset = rdev->mc.gtt_start;
  128. man->available_caching = TTM_PL_MASK_CACHING;
  129. man->default_caching = TTM_PL_FLAG_CACHED;
  130. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
  131. #if IS_ENABLED(CONFIG_AGP)
  132. if (rdev->flags & RADEON_IS_AGP) {
  133. if (!rdev->ddev->agp) {
  134. DRM_ERROR("AGP is not enabled for memory type %u\n",
  135. (unsigned)type);
  136. return -EINVAL;
  137. }
  138. if (!rdev->ddev->agp->cant_use_aperture)
  139. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  140. man->available_caching = TTM_PL_FLAG_UNCACHED |
  141. TTM_PL_FLAG_WC;
  142. man->default_caching = TTM_PL_FLAG_WC;
  143. }
  144. #endif
  145. break;
  146. case TTM_PL_VRAM:
  147. /* "On-card" video ram */
  148. man->func = &ttm_bo_manager_func;
  149. man->gpu_offset = rdev->mc.vram_start;
  150. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  151. TTM_MEMTYPE_FLAG_MAPPABLE;
  152. man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
  153. man->default_caching = TTM_PL_FLAG_WC;
  154. break;
  155. default:
  156. DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
  157. return -EINVAL;
  158. }
  159. return 0;
  160. }
  161. static void radeon_evict_flags(struct ttm_buffer_object *bo,
  162. struct ttm_placement *placement)
  163. {
  164. static struct ttm_place placements = {
  165. .fpfn = 0,
  166. .lpfn = 0,
  167. .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
  168. };
  169. struct radeon_bo *rbo;
  170. if (!radeon_ttm_bo_is_radeon_bo(bo)) {
  171. placement->placement = &placements;
  172. placement->busy_placement = &placements;
  173. placement->num_placement = 1;
  174. placement->num_busy_placement = 1;
  175. return;
  176. }
  177. rbo = container_of(bo, struct radeon_bo, tbo);
  178. switch (bo->mem.mem_type) {
  179. case TTM_PL_VRAM:
  180. if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
  181. radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
  182. else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
  183. bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
  184. unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
  185. int i;
  186. /* Try evicting to the CPU inaccessible part of VRAM
  187. * first, but only set GTT as busy placement, so this
  188. * BO will be evicted to GTT rather than causing other
  189. * BOs to be evicted from VRAM
  190. */
  191. radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
  192. RADEON_GEM_DOMAIN_GTT);
  193. rbo->placement.num_busy_placement = 0;
  194. for (i = 0; i < rbo->placement.num_placement; i++) {
  195. if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
  196. if (rbo->placements[i].fpfn < fpfn)
  197. rbo->placements[i].fpfn = fpfn;
  198. } else {
  199. rbo->placement.busy_placement =
  200. &rbo->placements[i];
  201. rbo->placement.num_busy_placement = 1;
  202. }
  203. }
  204. } else
  205. radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
  206. break;
  207. case TTM_PL_TT:
  208. default:
  209. radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
  210. }
  211. *placement = rbo->placement;
  212. }
  213. static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  214. {
  215. struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
  216. if (radeon_ttm_tt_has_userptr(bo->ttm))
  217. return -EPERM;
  218. return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
  219. }
  220. static void radeon_move_null(struct ttm_buffer_object *bo,
  221. struct ttm_mem_reg *new_mem)
  222. {
  223. struct ttm_mem_reg *old_mem = &bo->mem;
  224. BUG_ON(old_mem->mm_node != NULL);
  225. *old_mem = *new_mem;
  226. new_mem->mm_node = NULL;
  227. }
  228. static int radeon_move_blit(struct ttm_buffer_object *bo,
  229. bool evict, bool no_wait_gpu,
  230. struct ttm_mem_reg *new_mem,
  231. struct ttm_mem_reg *old_mem)
  232. {
  233. struct radeon_device *rdev;
  234. uint64_t old_start, new_start;
  235. struct radeon_fence *fence;
  236. unsigned num_pages;
  237. int r, ridx;
  238. rdev = radeon_get_rdev(bo->bdev);
  239. ridx = radeon_copy_ring_index(rdev);
  240. old_start = (u64)old_mem->start << PAGE_SHIFT;
  241. new_start = (u64)new_mem->start << PAGE_SHIFT;
  242. switch (old_mem->mem_type) {
  243. case TTM_PL_VRAM:
  244. old_start += rdev->mc.vram_start;
  245. break;
  246. case TTM_PL_TT:
  247. old_start += rdev->mc.gtt_start;
  248. break;
  249. default:
  250. DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
  251. return -EINVAL;
  252. }
  253. switch (new_mem->mem_type) {
  254. case TTM_PL_VRAM:
  255. new_start += rdev->mc.vram_start;
  256. break;
  257. case TTM_PL_TT:
  258. new_start += rdev->mc.gtt_start;
  259. break;
  260. default:
  261. DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
  262. return -EINVAL;
  263. }
  264. if (!rdev->ring[ridx].ready) {
  265. DRM_ERROR("Trying to move memory with ring turned off.\n");
  266. return -EINVAL;
  267. }
  268. BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
  269. num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
  270. fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
  271. if (IS_ERR(fence))
  272. return PTR_ERR(fence);
  273. r = ttm_bo_move_accel_cleanup(bo, &fence->base,
  274. evict, no_wait_gpu, new_mem);
  275. radeon_fence_unref(&fence);
  276. return r;
  277. }
  278. static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
  279. bool evict, bool interruptible,
  280. bool no_wait_gpu,
  281. struct ttm_mem_reg *new_mem)
  282. {
  283. struct radeon_device *rdev;
  284. struct ttm_mem_reg *old_mem = &bo->mem;
  285. struct ttm_mem_reg tmp_mem;
  286. struct ttm_place placements;
  287. struct ttm_placement placement;
  288. int r;
  289. rdev = radeon_get_rdev(bo->bdev);
  290. tmp_mem = *new_mem;
  291. tmp_mem.mm_node = NULL;
  292. placement.num_placement = 1;
  293. placement.placement = &placements;
  294. placement.num_busy_placement = 1;
  295. placement.busy_placement = &placements;
  296. placements.fpfn = 0;
  297. placements.lpfn = 0;
  298. placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
  299. r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
  300. interruptible, no_wait_gpu);
  301. if (unlikely(r)) {
  302. return r;
  303. }
  304. r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
  305. if (unlikely(r)) {
  306. goto out_cleanup;
  307. }
  308. r = ttm_tt_bind(bo->ttm, &tmp_mem);
  309. if (unlikely(r)) {
  310. goto out_cleanup;
  311. }
  312. r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
  313. if (unlikely(r)) {
  314. goto out_cleanup;
  315. }
  316. r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
  317. out_cleanup:
  318. ttm_bo_mem_put(bo, &tmp_mem);
  319. return r;
  320. }
  321. static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
  322. bool evict, bool interruptible,
  323. bool no_wait_gpu,
  324. struct ttm_mem_reg *new_mem)
  325. {
  326. struct radeon_device *rdev;
  327. struct ttm_mem_reg *old_mem = &bo->mem;
  328. struct ttm_mem_reg tmp_mem;
  329. struct ttm_placement placement;
  330. struct ttm_place placements;
  331. int r;
  332. rdev = radeon_get_rdev(bo->bdev);
  333. tmp_mem = *new_mem;
  334. tmp_mem.mm_node = NULL;
  335. placement.num_placement = 1;
  336. placement.placement = &placements;
  337. placement.num_busy_placement = 1;
  338. placement.busy_placement = &placements;
  339. placements.fpfn = 0;
  340. placements.lpfn = 0;
  341. placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
  342. r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
  343. interruptible, no_wait_gpu);
  344. if (unlikely(r)) {
  345. return r;
  346. }
  347. r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
  348. if (unlikely(r)) {
  349. goto out_cleanup;
  350. }
  351. r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
  352. if (unlikely(r)) {
  353. goto out_cleanup;
  354. }
  355. out_cleanup:
  356. ttm_bo_mem_put(bo, &tmp_mem);
  357. return r;
  358. }
  359. static int radeon_bo_move(struct ttm_buffer_object *bo,
  360. bool evict, bool interruptible,
  361. bool no_wait_gpu,
  362. struct ttm_mem_reg *new_mem)
  363. {
  364. struct radeon_device *rdev;
  365. struct ttm_mem_reg *old_mem = &bo->mem;
  366. int r;
  367. rdev = radeon_get_rdev(bo->bdev);
  368. if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  369. radeon_move_null(bo, new_mem);
  370. return 0;
  371. }
  372. if ((old_mem->mem_type == TTM_PL_TT &&
  373. new_mem->mem_type == TTM_PL_SYSTEM) ||
  374. (old_mem->mem_type == TTM_PL_SYSTEM &&
  375. new_mem->mem_type == TTM_PL_TT)) {
  376. /* bind is enough */
  377. radeon_move_null(bo, new_mem);
  378. return 0;
  379. }
  380. if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
  381. rdev->asic->copy.copy == NULL) {
  382. /* use memcpy */
  383. goto memcpy;
  384. }
  385. if (old_mem->mem_type == TTM_PL_VRAM &&
  386. new_mem->mem_type == TTM_PL_SYSTEM) {
  387. r = radeon_move_vram_ram(bo, evict, interruptible,
  388. no_wait_gpu, new_mem);
  389. } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
  390. new_mem->mem_type == TTM_PL_VRAM) {
  391. r = radeon_move_ram_vram(bo, evict, interruptible,
  392. no_wait_gpu, new_mem);
  393. } else {
  394. r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
  395. }
  396. if (r) {
  397. memcpy:
  398. r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
  399. if (r) {
  400. return r;
  401. }
  402. }
  403. /* update statistics */
  404. atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
  405. return 0;
  406. }
  407. static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  408. {
  409. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  410. struct radeon_device *rdev = radeon_get_rdev(bdev);
  411. mem->bus.addr = NULL;
  412. mem->bus.offset = 0;
  413. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  414. mem->bus.base = 0;
  415. mem->bus.is_iomem = false;
  416. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  417. return -EINVAL;
  418. switch (mem->mem_type) {
  419. case TTM_PL_SYSTEM:
  420. /* system memory */
  421. return 0;
  422. case TTM_PL_TT:
  423. #if IS_ENABLED(CONFIG_AGP)
  424. if (rdev->flags & RADEON_IS_AGP) {
  425. /* RADEON_IS_AGP is set only if AGP is active */
  426. mem->bus.offset = mem->start << PAGE_SHIFT;
  427. mem->bus.base = rdev->mc.agp_base;
  428. mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
  429. }
  430. #endif
  431. break;
  432. case TTM_PL_VRAM:
  433. mem->bus.offset = mem->start << PAGE_SHIFT;
  434. /* check if it's visible */
  435. if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
  436. return -EINVAL;
  437. mem->bus.base = rdev->mc.aper_base;
  438. mem->bus.is_iomem = true;
  439. #ifdef __alpha__
  440. /*
  441. * Alpha: use bus.addr to hold the ioremap() return,
  442. * so we can modify bus.base below.
  443. */
  444. if (mem->placement & TTM_PL_FLAG_WC)
  445. mem->bus.addr =
  446. ioremap_wc(mem->bus.base + mem->bus.offset,
  447. mem->bus.size);
  448. else
  449. mem->bus.addr =
  450. ioremap_nocache(mem->bus.base + mem->bus.offset,
  451. mem->bus.size);
  452. /*
  453. * Alpha: Use just the bus offset plus
  454. * the hose/domain memory base for bus.base.
  455. * It then can be used to build PTEs for VRAM
  456. * access, as done in ttm_bo_vm_fault().
  457. */
  458. mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
  459. rdev->ddev->hose->dense_mem_base;
  460. #endif
  461. break;
  462. default:
  463. return -EINVAL;
  464. }
  465. return 0;
  466. }
  467. static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  468. {
  469. }
  470. /*
  471. * TTM backend functions.
  472. */
  473. struct radeon_ttm_tt {
  474. struct ttm_dma_tt ttm;
  475. struct radeon_device *rdev;
  476. u64 offset;
  477. uint64_t userptr;
  478. struct mm_struct *usermm;
  479. uint32_t userflags;
  480. };
  481. /* prepare the sg table with the user pages */
  482. static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
  483. {
  484. struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
  485. struct radeon_ttm_tt *gtt = (void *)ttm;
  486. unsigned pinned = 0, nents;
  487. int r;
  488. int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
  489. enum dma_data_direction direction = write ?
  490. DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  491. if (current->mm != gtt->usermm)
  492. return -EPERM;
  493. if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
  494. /* check that we only pin down anonymous memory
  495. to prevent problems with writeback */
  496. unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
  497. struct vm_area_struct *vma;
  498. vma = find_vma(gtt->usermm, gtt->userptr);
  499. if (!vma || vma->vm_file || vma->vm_end < end)
  500. return -EPERM;
  501. }
  502. do {
  503. unsigned num_pages = ttm->num_pages - pinned;
  504. uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
  505. struct page **pages = ttm->pages + pinned;
  506. r = get_user_pages(current, current->mm, userptr, num_pages,
  507. write ? FOLL_WRITE : 0, pages, NULL);
  508. if (r < 0)
  509. goto release_pages;
  510. pinned += r;
  511. } while (pinned < ttm->num_pages);
  512. r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
  513. ttm->num_pages << PAGE_SHIFT,
  514. GFP_KERNEL);
  515. if (r)
  516. goto release_sg;
  517. r = -ENOMEM;
  518. nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
  519. if (nents != ttm->sg->nents)
  520. goto release_sg;
  521. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  522. gtt->ttm.dma_address, ttm->num_pages);
  523. return 0;
  524. release_sg:
  525. kfree(ttm->sg);
  526. release_pages:
  527. release_pages(ttm->pages, pinned, 0);
  528. return r;
  529. }
  530. static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
  531. {
  532. struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
  533. struct radeon_ttm_tt *gtt = (void *)ttm;
  534. struct sg_page_iter sg_iter;
  535. int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
  536. enum dma_data_direction direction = write ?
  537. DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  538. /* double check that we don't free the table twice */
  539. if (!ttm->sg->sgl)
  540. return;
  541. /* free the sg table and pages again */
  542. dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
  543. for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
  544. struct page *page = sg_page_iter_page(&sg_iter);
  545. if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
  546. set_page_dirty(page);
  547. mark_page_accessed(page);
  548. page_cache_release(page);
  549. }
  550. sg_free_table(ttm->sg);
  551. }
  552. static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
  553. struct ttm_mem_reg *bo_mem)
  554. {
  555. struct radeon_ttm_tt *gtt = (void*)ttm;
  556. uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
  557. RADEON_GART_PAGE_WRITE;
  558. int r;
  559. if (gtt->userptr) {
  560. radeon_ttm_tt_pin_userptr(ttm);
  561. flags &= ~RADEON_GART_PAGE_WRITE;
  562. }
  563. gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
  564. if (!ttm->num_pages) {
  565. WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
  566. ttm->num_pages, bo_mem, ttm);
  567. }
  568. if (ttm->caching_state == tt_cached)
  569. flags |= RADEON_GART_PAGE_SNOOP;
  570. r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
  571. ttm->pages, gtt->ttm.dma_address, flags);
  572. if (r) {
  573. DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
  574. ttm->num_pages, (unsigned)gtt->offset);
  575. return r;
  576. }
  577. return 0;
  578. }
  579. static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
  580. {
  581. struct radeon_ttm_tt *gtt = (void *)ttm;
  582. radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
  583. if (gtt->userptr)
  584. radeon_ttm_tt_unpin_userptr(ttm);
  585. return 0;
  586. }
  587. static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
  588. {
  589. struct radeon_ttm_tt *gtt = (void *)ttm;
  590. ttm_dma_tt_fini(&gtt->ttm);
  591. kfree(gtt);
  592. }
  593. static struct ttm_backend_func radeon_backend_func = {
  594. .bind = &radeon_ttm_backend_bind,
  595. .unbind = &radeon_ttm_backend_unbind,
  596. .destroy = &radeon_ttm_backend_destroy,
  597. };
  598. static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
  599. unsigned long size, uint32_t page_flags,
  600. struct page *dummy_read_page)
  601. {
  602. struct radeon_device *rdev;
  603. struct radeon_ttm_tt *gtt;
  604. rdev = radeon_get_rdev(bdev);
  605. #if IS_ENABLED(CONFIG_AGP)
  606. if (rdev->flags & RADEON_IS_AGP) {
  607. return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
  608. size, page_flags, dummy_read_page);
  609. }
  610. #endif
  611. gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
  612. if (gtt == NULL) {
  613. return NULL;
  614. }
  615. gtt->ttm.ttm.func = &radeon_backend_func;
  616. gtt->rdev = rdev;
  617. if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
  618. kfree(gtt);
  619. return NULL;
  620. }
  621. return &gtt->ttm.ttm;
  622. }
  623. static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
  624. {
  625. if (!ttm || ttm->func != &radeon_backend_func)
  626. return NULL;
  627. return (struct radeon_ttm_tt *)ttm;
  628. }
  629. static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
  630. {
  631. struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
  632. struct radeon_device *rdev;
  633. unsigned i;
  634. int r;
  635. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  636. if (ttm->state != tt_unpopulated)
  637. return 0;
  638. if (gtt && gtt->userptr) {
  639. ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  640. if (!ttm->sg)
  641. return -ENOMEM;
  642. ttm->page_flags |= TTM_PAGE_FLAG_SG;
  643. ttm->state = tt_unbound;
  644. return 0;
  645. }
  646. if (slave && ttm->sg) {
  647. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  648. gtt->ttm.dma_address, ttm->num_pages);
  649. ttm->state = tt_unbound;
  650. return 0;
  651. }
  652. rdev = radeon_get_rdev(ttm->bdev);
  653. #if IS_ENABLED(CONFIG_AGP)
  654. if (rdev->flags & RADEON_IS_AGP) {
  655. return ttm_agp_tt_populate(ttm);
  656. }
  657. #endif
  658. #ifdef CONFIG_SWIOTLB
  659. if (swiotlb_nr_tbl()) {
  660. return ttm_dma_populate(&gtt->ttm, rdev->dev);
  661. }
  662. #endif
  663. r = ttm_pool_populate(ttm);
  664. if (r) {
  665. return r;
  666. }
  667. for (i = 0; i < ttm->num_pages; i++) {
  668. gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
  669. 0, PAGE_SIZE,
  670. PCI_DMA_BIDIRECTIONAL);
  671. if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
  672. while (i--) {
  673. pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
  674. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  675. gtt->ttm.dma_address[i] = 0;
  676. }
  677. ttm_pool_unpopulate(ttm);
  678. return -EFAULT;
  679. }
  680. }
  681. return 0;
  682. }
  683. static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
  684. {
  685. struct radeon_device *rdev;
  686. struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
  687. unsigned i;
  688. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  689. if (gtt && gtt->userptr) {
  690. kfree(ttm->sg);
  691. ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
  692. return;
  693. }
  694. if (slave)
  695. return;
  696. rdev = radeon_get_rdev(ttm->bdev);
  697. #if IS_ENABLED(CONFIG_AGP)
  698. if (rdev->flags & RADEON_IS_AGP) {
  699. ttm_agp_tt_unpopulate(ttm);
  700. return;
  701. }
  702. #endif
  703. #ifdef CONFIG_SWIOTLB
  704. if (swiotlb_nr_tbl()) {
  705. ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
  706. return;
  707. }
  708. #endif
  709. for (i = 0; i < ttm->num_pages; i++) {
  710. if (gtt->ttm.dma_address[i]) {
  711. pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
  712. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  713. }
  714. }
  715. ttm_pool_unpopulate(ttm);
  716. }
  717. int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
  718. uint32_t flags)
  719. {
  720. struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
  721. if (gtt == NULL)
  722. return -EINVAL;
  723. gtt->userptr = addr;
  724. gtt->usermm = current->mm;
  725. gtt->userflags = flags;
  726. return 0;
  727. }
  728. bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
  729. {
  730. struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
  731. if (gtt == NULL)
  732. return false;
  733. return !!gtt->userptr;
  734. }
  735. bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
  736. {
  737. struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
  738. if (gtt == NULL)
  739. return false;
  740. return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
  741. }
  742. static struct ttm_bo_driver radeon_bo_driver = {
  743. .ttm_tt_create = &radeon_ttm_tt_create,
  744. .ttm_tt_populate = &radeon_ttm_tt_populate,
  745. .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
  746. .invalidate_caches = &radeon_invalidate_caches,
  747. .init_mem_type = &radeon_init_mem_type,
  748. .evict_flags = &radeon_evict_flags,
  749. .move = &radeon_bo_move,
  750. .verify_access = &radeon_verify_access,
  751. .move_notify = &radeon_bo_move_notify,
  752. .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
  753. .io_mem_reserve = &radeon_ttm_io_mem_reserve,
  754. .io_mem_free = &radeon_ttm_io_mem_free,
  755. };
  756. int radeon_ttm_init(struct radeon_device *rdev)
  757. {
  758. int r;
  759. r = radeon_ttm_global_init(rdev);
  760. if (r) {
  761. return r;
  762. }
  763. /* No others user of address space so set it to 0 */
  764. r = ttm_bo_device_init(&rdev->mman.bdev,
  765. rdev->mman.bo_global_ref.ref.object,
  766. &radeon_bo_driver,
  767. rdev->ddev->anon_inode->i_mapping,
  768. DRM_FILE_PAGE_OFFSET,
  769. rdev->need_dma32);
  770. if (r) {
  771. DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
  772. return r;
  773. }
  774. rdev->mman.initialized = true;
  775. r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
  776. rdev->mc.real_vram_size >> PAGE_SHIFT);
  777. if (r) {
  778. DRM_ERROR("Failed initializing VRAM heap.\n");
  779. return r;
  780. }
  781. /* Change the size here instead of the init above so only lpfn is affected */
  782. radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
  783. r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
  784. RADEON_GEM_DOMAIN_VRAM, 0, NULL,
  785. NULL, &rdev->stollen_vga_memory);
  786. if (r) {
  787. return r;
  788. }
  789. r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
  790. if (r)
  791. return r;
  792. r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
  793. radeon_bo_unreserve(rdev->stollen_vga_memory);
  794. if (r) {
  795. radeon_bo_unref(&rdev->stollen_vga_memory);
  796. return r;
  797. }
  798. DRM_INFO("radeon: %uM of VRAM memory ready\n",
  799. (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
  800. r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
  801. rdev->mc.gtt_size >> PAGE_SHIFT);
  802. if (r) {
  803. DRM_ERROR("Failed initializing GTT heap.\n");
  804. return r;
  805. }
  806. DRM_INFO("radeon: %uM of GTT memory ready.\n",
  807. (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
  808. r = radeon_ttm_debugfs_init(rdev);
  809. if (r) {
  810. DRM_ERROR("Failed to init debugfs\n");
  811. return r;
  812. }
  813. return 0;
  814. }
  815. void radeon_ttm_fini(struct radeon_device *rdev)
  816. {
  817. int r;
  818. if (!rdev->mman.initialized)
  819. return;
  820. radeon_ttm_debugfs_fini(rdev);
  821. if (rdev->stollen_vga_memory) {
  822. r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
  823. if (r == 0) {
  824. radeon_bo_unpin(rdev->stollen_vga_memory);
  825. radeon_bo_unreserve(rdev->stollen_vga_memory);
  826. }
  827. radeon_bo_unref(&rdev->stollen_vga_memory);
  828. }
  829. ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
  830. ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
  831. ttm_bo_device_release(&rdev->mman.bdev);
  832. radeon_gart_fini(rdev);
  833. radeon_ttm_global_fini(rdev);
  834. rdev->mman.initialized = false;
  835. DRM_INFO("radeon: ttm finalized\n");
  836. }
  837. /* this should only be called at bootup or when userspace
  838. * isn't running */
  839. void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
  840. {
  841. struct ttm_mem_type_manager *man;
  842. if (!rdev->mman.initialized)
  843. return;
  844. man = &rdev->mman.bdev.man[TTM_PL_VRAM];
  845. /* this just adjusts TTM size idea, which sets lpfn to the correct value */
  846. man->size = size >> PAGE_SHIFT;
  847. }
  848. static struct vm_operations_struct radeon_ttm_vm_ops;
  849. static const struct vm_operations_struct *ttm_vm_ops = NULL;
  850. static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  851. {
  852. struct ttm_buffer_object *bo;
  853. struct radeon_device *rdev;
  854. int r;
  855. bo = (struct ttm_buffer_object *)vma->vm_private_data;
  856. if (bo == NULL) {
  857. return VM_FAULT_NOPAGE;
  858. }
  859. rdev = radeon_get_rdev(bo->bdev);
  860. down_read(&rdev->pm.mclk_lock);
  861. r = ttm_vm_ops->fault(vma, vmf);
  862. up_read(&rdev->pm.mclk_lock);
  863. return r;
  864. }
  865. int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
  866. {
  867. struct drm_file *file_priv;
  868. struct radeon_device *rdev;
  869. int r;
  870. if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
  871. return -EINVAL;
  872. }
  873. file_priv = filp->private_data;
  874. rdev = file_priv->minor->dev->dev_private;
  875. if (rdev == NULL) {
  876. return -EINVAL;
  877. }
  878. r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
  879. if (unlikely(r != 0)) {
  880. return r;
  881. }
  882. if (unlikely(ttm_vm_ops == NULL)) {
  883. ttm_vm_ops = vma->vm_ops;
  884. radeon_ttm_vm_ops = *ttm_vm_ops;
  885. radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
  886. }
  887. vma->vm_ops = &radeon_ttm_vm_ops;
  888. return 0;
  889. }
  890. #if defined(CONFIG_DEBUG_FS)
  891. static int radeon_mm_dump_table(struct seq_file *m, void *data)
  892. {
  893. struct drm_info_node *node = (struct drm_info_node *)m->private;
  894. unsigned ttm_pl = *(int *)node->info_ent->data;
  895. struct drm_device *dev = node->minor->dev;
  896. struct radeon_device *rdev = dev->dev_private;
  897. struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
  898. int ret;
  899. struct ttm_bo_global *glob = rdev->mman.bdev.glob;
  900. spin_lock(&glob->lru_lock);
  901. ret = drm_mm_dump_table(m, mm);
  902. spin_unlock(&glob->lru_lock);
  903. return ret;
  904. }
  905. static int ttm_pl_vram = TTM_PL_VRAM;
  906. static int ttm_pl_tt = TTM_PL_TT;
  907. static struct drm_info_list radeon_ttm_debugfs_list[] = {
  908. {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
  909. {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
  910. {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
  911. #ifdef CONFIG_SWIOTLB
  912. {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
  913. #endif
  914. };
  915. static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
  916. {
  917. struct radeon_device *rdev = inode->i_private;
  918. i_size_write(inode, rdev->mc.mc_vram_size);
  919. filep->private_data = inode->i_private;
  920. return 0;
  921. }
  922. static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
  923. size_t size, loff_t *pos)
  924. {
  925. struct radeon_device *rdev = f->private_data;
  926. ssize_t result = 0;
  927. int r;
  928. if (size & 0x3 || *pos & 0x3)
  929. return -EINVAL;
  930. while (size) {
  931. unsigned long flags;
  932. uint32_t value;
  933. if (*pos >= rdev->mc.mc_vram_size)
  934. return result;
  935. spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
  936. WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
  937. if (rdev->family >= CHIP_CEDAR)
  938. WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
  939. value = RREG32(RADEON_MM_DATA);
  940. spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
  941. r = put_user(value, (uint32_t *)buf);
  942. if (r)
  943. return r;
  944. result += 4;
  945. buf += 4;
  946. *pos += 4;
  947. size -= 4;
  948. }
  949. return result;
  950. }
  951. static const struct file_operations radeon_ttm_vram_fops = {
  952. .owner = THIS_MODULE,
  953. .open = radeon_ttm_vram_open,
  954. .read = radeon_ttm_vram_read,
  955. .llseek = default_llseek
  956. };
  957. static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
  958. {
  959. struct radeon_device *rdev = inode->i_private;
  960. i_size_write(inode, rdev->mc.gtt_size);
  961. filep->private_data = inode->i_private;
  962. return 0;
  963. }
  964. static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
  965. size_t size, loff_t *pos)
  966. {
  967. struct radeon_device *rdev = f->private_data;
  968. ssize_t result = 0;
  969. int r;
  970. while (size) {
  971. loff_t p = *pos / PAGE_SIZE;
  972. unsigned off = *pos & ~PAGE_MASK;
  973. size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
  974. struct page *page;
  975. void *ptr;
  976. if (p >= rdev->gart.num_cpu_pages)
  977. return result;
  978. page = rdev->gart.pages[p];
  979. if (page) {
  980. ptr = kmap(page);
  981. ptr += off;
  982. r = copy_to_user(buf, ptr, cur_size);
  983. kunmap(rdev->gart.pages[p]);
  984. } else
  985. r = clear_user(buf, cur_size);
  986. if (r)
  987. return -EFAULT;
  988. result += cur_size;
  989. buf += cur_size;
  990. *pos += cur_size;
  991. size -= cur_size;
  992. }
  993. return result;
  994. }
  995. static const struct file_operations radeon_ttm_gtt_fops = {
  996. .owner = THIS_MODULE,
  997. .open = radeon_ttm_gtt_open,
  998. .read = radeon_ttm_gtt_read,
  999. .llseek = default_llseek
  1000. };
  1001. #endif
  1002. static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
  1003. {
  1004. #if defined(CONFIG_DEBUG_FS)
  1005. unsigned count;
  1006. struct drm_minor *minor = rdev->ddev->primary;
  1007. struct dentry *ent, *root = minor->debugfs_root;
  1008. ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
  1009. rdev, &radeon_ttm_vram_fops);
  1010. if (IS_ERR(ent))
  1011. return PTR_ERR(ent);
  1012. rdev->mman.vram = ent;
  1013. ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
  1014. rdev, &radeon_ttm_gtt_fops);
  1015. if (IS_ERR(ent))
  1016. return PTR_ERR(ent);
  1017. rdev->mman.gtt = ent;
  1018. count = ARRAY_SIZE(radeon_ttm_debugfs_list);
  1019. #ifdef CONFIG_SWIOTLB
  1020. if (!swiotlb_nr_tbl())
  1021. --count;
  1022. #endif
  1023. return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
  1024. #else
  1025. return 0;
  1026. #endif
  1027. }
  1028. static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
  1029. {
  1030. #if defined(CONFIG_DEBUG_FS)
  1031. debugfs_remove(rdev->mman.vram);
  1032. rdev->mman.vram = NULL;
  1033. debugfs_remove(rdev->mman.gtt);
  1034. rdev->mman.gtt = NULL;
  1035. #endif
  1036. }