si_dma.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <drm/drmP.h>
  25. #include "radeon.h"
  26. #include "radeon_asic.h"
  27. #include "radeon_trace.h"
  28. #include "sid.h"
  29. u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
  30. /**
  31. * si_dma_is_lockup - Check if the DMA engine is locked up
  32. *
  33. * @rdev: radeon_device pointer
  34. * @ring: radeon_ring structure holding ring information
  35. *
  36. * Check if the async DMA engine is locked up.
  37. * Returns true if the engine appears to be locked up, false if not.
  38. */
  39. bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  40. {
  41. u32 reset_mask = si_gpu_check_soft_reset(rdev);
  42. u32 mask;
  43. if (ring->idx == R600_RING_TYPE_DMA_INDEX)
  44. mask = RADEON_RESET_DMA;
  45. else
  46. mask = RADEON_RESET_DMA1;
  47. if (!(reset_mask & mask)) {
  48. radeon_ring_lockup_update(rdev, ring);
  49. return false;
  50. }
  51. return radeon_ring_test_lockup(rdev, ring);
  52. }
  53. /**
  54. * si_dma_vm_copy_pages - update PTEs by copying them from the GART
  55. *
  56. * @rdev: radeon_device pointer
  57. * @ib: indirect buffer to fill with commands
  58. * @pe: addr of the page entry
  59. * @src: src addr where to copy from
  60. * @count: number of page entries to update
  61. *
  62. * Update PTEs by copying them from the GART using the DMA (SI).
  63. */
  64. void si_dma_vm_copy_pages(struct radeon_device *rdev,
  65. struct radeon_ib *ib,
  66. uint64_t pe, uint64_t src,
  67. unsigned count)
  68. {
  69. while (count) {
  70. unsigned bytes = count * 8;
  71. if (bytes > 0xFFFF8)
  72. bytes = 0xFFFF8;
  73. ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
  74. 1, 0, 0, bytes);
  75. ib->ptr[ib->length_dw++] = lower_32_bits(pe);
  76. ib->ptr[ib->length_dw++] = lower_32_bits(src);
  77. ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
  78. ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
  79. pe += bytes;
  80. src += bytes;
  81. count -= bytes / 8;
  82. }
  83. }
  84. /**
  85. * si_dma_vm_write_pages - update PTEs by writing them manually
  86. *
  87. * @rdev: radeon_device pointer
  88. * @ib: indirect buffer to fill with commands
  89. * @pe: addr of the page entry
  90. * @addr: dst addr to write into pe
  91. * @count: number of page entries to update
  92. * @incr: increase next addr by incr bytes
  93. * @flags: access flags
  94. *
  95. * Update PTEs by writing them manually using the DMA (SI).
  96. */
  97. void si_dma_vm_write_pages(struct radeon_device *rdev,
  98. struct radeon_ib *ib,
  99. uint64_t pe,
  100. uint64_t addr, unsigned count,
  101. uint32_t incr, uint32_t flags)
  102. {
  103. uint64_t value;
  104. unsigned ndw;
  105. while (count) {
  106. ndw = count * 2;
  107. if (ndw > 0xFFFFE)
  108. ndw = 0xFFFFE;
  109. /* for non-physically contiguous pages (system) */
  110. ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
  111. ib->ptr[ib->length_dw++] = pe;
  112. ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
  113. for (; ndw > 0; ndw -= 2, --count, pe += 8) {
  114. if (flags & R600_PTE_SYSTEM) {
  115. value = radeon_vm_map_gart(rdev, addr);
  116. } else if (flags & R600_PTE_VALID) {
  117. value = addr;
  118. } else {
  119. value = 0;
  120. }
  121. addr += incr;
  122. value |= flags;
  123. ib->ptr[ib->length_dw++] = value;
  124. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  125. }
  126. }
  127. }
  128. /**
  129. * si_dma_vm_set_pages - update the page tables using the DMA
  130. *
  131. * @rdev: radeon_device pointer
  132. * @ib: indirect buffer to fill with commands
  133. * @pe: addr of the page entry
  134. * @addr: dst addr to write into pe
  135. * @count: number of page entries to update
  136. * @incr: increase next addr by incr bytes
  137. * @flags: access flags
  138. *
  139. * Update the page tables using the DMA (SI).
  140. */
  141. void si_dma_vm_set_pages(struct radeon_device *rdev,
  142. struct radeon_ib *ib,
  143. uint64_t pe,
  144. uint64_t addr, unsigned count,
  145. uint32_t incr, uint32_t flags)
  146. {
  147. uint64_t value;
  148. unsigned ndw;
  149. while (count) {
  150. ndw = count * 2;
  151. if (ndw > 0xFFFFE)
  152. ndw = 0xFFFFE;
  153. if (flags & R600_PTE_VALID)
  154. value = addr;
  155. else
  156. value = 0;
  157. /* for physically contiguous pages (vram) */
  158. ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
  159. ib->ptr[ib->length_dw++] = pe; /* dst addr */
  160. ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
  161. ib->ptr[ib->length_dw++] = flags; /* mask */
  162. ib->ptr[ib->length_dw++] = 0;
  163. ib->ptr[ib->length_dw++] = value; /* value */
  164. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  165. ib->ptr[ib->length_dw++] = incr; /* increment size */
  166. ib->ptr[ib->length_dw++] = 0;
  167. pe += ndw * 4;
  168. addr += (ndw / 2) * incr;
  169. count -= ndw / 2;
  170. }
  171. }
  172. void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
  173. unsigned vm_id, uint64_t pd_addr)
  174. {
  175. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
  176. if (vm_id < 8) {
  177. radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
  178. } else {
  179. radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
  180. }
  181. radeon_ring_write(ring, pd_addr >> 12);
  182. /* flush hdp cache */
  183. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
  184. radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
  185. radeon_ring_write(ring, 1);
  186. /* bits 0-7 are the VM contexts0-7 */
  187. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
  188. radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
  189. radeon_ring_write(ring, 1 << vm_id);
  190. /* wait for invalidate to complete */
  191. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
  192. radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
  193. radeon_ring_write(ring, 0xff << 16); /* retry */
  194. radeon_ring_write(ring, 1 << vm_id); /* mask */
  195. radeon_ring_write(ring, 0); /* value */
  196. radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
  197. }
  198. /**
  199. * si_copy_dma - copy pages using the DMA engine
  200. *
  201. * @rdev: radeon_device pointer
  202. * @src_offset: src GPU address
  203. * @dst_offset: dst GPU address
  204. * @num_gpu_pages: number of GPU pages to xfer
  205. * @resv: reservation object to sync to
  206. *
  207. * Copy GPU paging using the DMA engine (SI).
  208. * Used by the radeon ttm implementation to move pages if
  209. * registered as the asic copy callback.
  210. */
  211. struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
  212. uint64_t src_offset, uint64_t dst_offset,
  213. unsigned num_gpu_pages,
  214. struct reservation_object *resv)
  215. {
  216. struct radeon_fence *fence;
  217. struct radeon_sync sync;
  218. int ring_index = rdev->asic->copy.dma_ring_index;
  219. struct radeon_ring *ring = &rdev->ring[ring_index];
  220. u32 size_in_bytes, cur_size_in_bytes;
  221. int i, num_loops;
  222. int r = 0;
  223. radeon_sync_create(&sync);
  224. size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
  225. num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
  226. r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
  227. if (r) {
  228. DRM_ERROR("radeon: moving bo (%d).\n", r);
  229. radeon_sync_free(rdev, &sync, NULL);
  230. return ERR_PTR(r);
  231. }
  232. radeon_sync_resv(rdev, &sync, resv, false);
  233. radeon_sync_rings(rdev, &sync, ring->idx);
  234. for (i = 0; i < num_loops; i++) {
  235. cur_size_in_bytes = size_in_bytes;
  236. if (cur_size_in_bytes > 0xFFFFF)
  237. cur_size_in_bytes = 0xFFFFF;
  238. size_in_bytes -= cur_size_in_bytes;
  239. radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
  240. radeon_ring_write(ring, lower_32_bits(dst_offset));
  241. radeon_ring_write(ring, lower_32_bits(src_offset));
  242. radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
  243. radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
  244. src_offset += cur_size_in_bytes;
  245. dst_offset += cur_size_in_bytes;
  246. }
  247. r = radeon_fence_emit(rdev, &fence, ring->idx);
  248. if (r) {
  249. radeon_ring_unlock_undo(rdev, ring);
  250. radeon_sync_free(rdev, &sync, NULL);
  251. return ERR_PTR(r);
  252. }
  253. radeon_ring_unlock_commit(rdev, ring, false);
  254. radeon_sync_free(rdev, &sync, fence);
  255. return fence;
  256. }