amdgpu_sa.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * Copyright 2011 Red Hat Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Jerome Glisse <glisse@freedesktop.org>
  29. */
  30. /* Algorithm:
  31. *
  32. * We store the last allocated bo in "hole", we always try to allocate
  33. * after the last allocated bo. Principle is that in a linear GPU ring
  34. * progression was is after last is the oldest bo we allocated and thus
  35. * the first one that should no longer be in use by the GPU.
  36. *
  37. * If it's not the case we skip over the bo after last to the closest
  38. * done bo if such one exist. If none exist and we are not asked to
  39. * block we report failure to allocate.
  40. *
  41. * If we are asked to block we wait on all the oldest fence of all
  42. * rings. We just wait for any of those fence to complete.
  43. */
  44. #include <drm/drmP.h>
  45. #include "amdgpu.h"
  46. static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
  47. static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
  48. int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
  49. struct amdgpu_sa_manager *sa_manager,
  50. unsigned size, u32 align, u32 domain)
  51. {
  52. int i, r;
  53. init_waitqueue_head(&sa_manager->wq);
  54. sa_manager->bo = NULL;
  55. sa_manager->size = size;
  56. sa_manager->domain = domain;
  57. sa_manager->align = align;
  58. sa_manager->hole = &sa_manager->olist;
  59. INIT_LIST_HEAD(&sa_manager->olist);
  60. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  61. INIT_LIST_HEAD(&sa_manager->flist[i]);
  62. }
  63. r = amdgpu_bo_create(adev, size, align, true, domain,
  64. 0, NULL, NULL, &sa_manager->bo);
  65. if (r) {
  66. dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
  67. return r;
  68. }
  69. return r;
  70. }
  71. void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
  72. struct amdgpu_sa_manager *sa_manager)
  73. {
  74. struct amdgpu_sa_bo *sa_bo, *tmp;
  75. if (!list_empty(&sa_manager->olist)) {
  76. sa_manager->hole = &sa_manager->olist,
  77. amdgpu_sa_bo_try_free(sa_manager);
  78. if (!list_empty(&sa_manager->olist)) {
  79. dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
  80. }
  81. }
  82. list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
  83. amdgpu_sa_bo_remove_locked(sa_bo);
  84. }
  85. amdgpu_bo_unref(&sa_manager->bo);
  86. sa_manager->size = 0;
  87. }
  88. int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
  89. struct amdgpu_sa_manager *sa_manager)
  90. {
  91. int r;
  92. if (sa_manager->bo == NULL) {
  93. dev_err(adev->dev, "no bo for sa manager\n");
  94. return -EINVAL;
  95. }
  96. /* map the buffer */
  97. r = amdgpu_bo_reserve(sa_manager->bo, false);
  98. if (r) {
  99. dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
  100. return r;
  101. }
  102. r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
  103. if (r) {
  104. amdgpu_bo_unreserve(sa_manager->bo);
  105. dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
  106. return r;
  107. }
  108. r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
  109. amdgpu_bo_unreserve(sa_manager->bo);
  110. return r;
  111. }
  112. int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
  113. struct amdgpu_sa_manager *sa_manager)
  114. {
  115. int r;
  116. if (sa_manager->bo == NULL) {
  117. dev_err(adev->dev, "no bo for sa manager\n");
  118. return -EINVAL;
  119. }
  120. r = amdgpu_bo_reserve(sa_manager->bo, false);
  121. if (!r) {
  122. amdgpu_bo_kunmap(sa_manager->bo);
  123. amdgpu_bo_unpin(sa_manager->bo);
  124. amdgpu_bo_unreserve(sa_manager->bo);
  125. }
  126. return r;
  127. }
  128. static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
  129. {
  130. struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
  131. if (sa_manager->hole == &sa_bo->olist) {
  132. sa_manager->hole = sa_bo->olist.prev;
  133. }
  134. list_del_init(&sa_bo->olist);
  135. list_del_init(&sa_bo->flist);
  136. fence_put(sa_bo->fence);
  137. kfree(sa_bo);
  138. }
  139. static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
  140. {
  141. struct amdgpu_sa_bo *sa_bo, *tmp;
  142. if (sa_manager->hole->next == &sa_manager->olist)
  143. return;
  144. sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
  145. list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
  146. if (sa_bo->fence == NULL ||
  147. !fence_is_signaled(sa_bo->fence)) {
  148. return;
  149. }
  150. amdgpu_sa_bo_remove_locked(sa_bo);
  151. }
  152. }
  153. static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
  154. {
  155. struct list_head *hole = sa_manager->hole;
  156. if (hole != &sa_manager->olist) {
  157. return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
  158. }
  159. return 0;
  160. }
  161. static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
  162. {
  163. struct list_head *hole = sa_manager->hole;
  164. if (hole->next != &sa_manager->olist) {
  165. return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
  166. }
  167. return sa_manager->size;
  168. }
  169. static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
  170. struct amdgpu_sa_bo *sa_bo,
  171. unsigned size, unsigned align)
  172. {
  173. unsigned soffset, eoffset, wasted;
  174. soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
  175. eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
  176. wasted = (align - (soffset % align)) % align;
  177. if ((eoffset - soffset) >= (size + wasted)) {
  178. soffset += wasted;
  179. sa_bo->manager = sa_manager;
  180. sa_bo->soffset = soffset;
  181. sa_bo->eoffset = soffset + size;
  182. list_add(&sa_bo->olist, sa_manager->hole);
  183. INIT_LIST_HEAD(&sa_bo->flist);
  184. sa_manager->hole = &sa_bo->olist;
  185. return true;
  186. }
  187. return false;
  188. }
  189. /**
  190. * amdgpu_sa_event - Check if we can stop waiting
  191. *
  192. * @sa_manager: pointer to the sa_manager
  193. * @size: number of bytes we want to allocate
  194. * @align: alignment we need to match
  195. *
  196. * Check if either there is a fence we can wait for or
  197. * enough free memory to satisfy the allocation directly
  198. */
  199. static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
  200. unsigned size, unsigned align)
  201. {
  202. unsigned soffset, eoffset, wasted;
  203. int i;
  204. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  205. if (!list_empty(&sa_manager->flist[i])) {
  206. return true;
  207. }
  208. }
  209. soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
  210. eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
  211. wasted = (align - (soffset % align)) % align;
  212. if ((eoffset - soffset) >= (size + wasted)) {
  213. return true;
  214. }
  215. return false;
  216. }
  217. static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
  218. struct fence **fences,
  219. unsigned *tries)
  220. {
  221. struct amdgpu_sa_bo *best_bo = NULL;
  222. unsigned i, soffset, best, tmp;
  223. /* if hole points to the end of the buffer */
  224. if (sa_manager->hole->next == &sa_manager->olist) {
  225. /* try again with its beginning */
  226. sa_manager->hole = &sa_manager->olist;
  227. return true;
  228. }
  229. soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
  230. /* to handle wrap around we add sa_manager->size */
  231. best = sa_manager->size * 2;
  232. /* go over all fence list and try to find the closest sa_bo
  233. * of the current last
  234. */
  235. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  236. struct amdgpu_sa_bo *sa_bo;
  237. if (list_empty(&sa_manager->flist[i])) {
  238. continue;
  239. }
  240. sa_bo = list_first_entry(&sa_manager->flist[i],
  241. struct amdgpu_sa_bo, flist);
  242. if (!fence_is_signaled(sa_bo->fence)) {
  243. fences[i] = sa_bo->fence;
  244. continue;
  245. }
  246. /* limit the number of tries each ring gets */
  247. if (tries[i] > 2) {
  248. continue;
  249. }
  250. tmp = sa_bo->soffset;
  251. if (tmp < soffset) {
  252. /* wrap around, pretend it's after */
  253. tmp += sa_manager->size;
  254. }
  255. tmp -= soffset;
  256. if (tmp < best) {
  257. /* this sa bo is the closest one */
  258. best = tmp;
  259. best_bo = sa_bo;
  260. }
  261. }
  262. if (best_bo) {
  263. uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
  264. ++tries[idx];
  265. sa_manager->hole = best_bo->olist.prev;
  266. /* we knew that this one is signaled,
  267. so it's save to remote it */
  268. amdgpu_sa_bo_remove_locked(best_bo);
  269. return true;
  270. }
  271. return false;
  272. }
  273. int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
  274. struct amdgpu_sa_bo **sa_bo,
  275. unsigned size, unsigned align)
  276. {
  277. struct fence *fences[AMDGPU_MAX_RINGS];
  278. unsigned tries[AMDGPU_MAX_RINGS];
  279. unsigned count;
  280. int i, r;
  281. signed long t;
  282. BUG_ON(align > sa_manager->align);
  283. BUG_ON(size > sa_manager->size);
  284. *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
  285. if ((*sa_bo) == NULL) {
  286. return -ENOMEM;
  287. }
  288. (*sa_bo)->manager = sa_manager;
  289. (*sa_bo)->fence = NULL;
  290. INIT_LIST_HEAD(&(*sa_bo)->olist);
  291. INIT_LIST_HEAD(&(*sa_bo)->flist);
  292. spin_lock(&sa_manager->wq.lock);
  293. do {
  294. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  295. fences[i] = NULL;
  296. tries[i] = 0;
  297. }
  298. do {
  299. amdgpu_sa_bo_try_free(sa_manager);
  300. if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
  301. size, align)) {
  302. spin_unlock(&sa_manager->wq.lock);
  303. return 0;
  304. }
  305. /* see if we can skip over some allocations */
  306. } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
  307. for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
  308. if (fences[i])
  309. fences[count++] = fence_get(fences[i]);
  310. if (count) {
  311. spin_unlock(&sa_manager->wq.lock);
  312. t = fence_wait_any_timeout(fences, count, false,
  313. MAX_SCHEDULE_TIMEOUT);
  314. for (i = 0; i < count; ++i)
  315. fence_put(fences[i]);
  316. r = (t > 0) ? 0 : t;
  317. spin_lock(&sa_manager->wq.lock);
  318. } else {
  319. /* if we have nothing to wait for block */
  320. r = wait_event_interruptible_locked(
  321. sa_manager->wq,
  322. amdgpu_sa_event(sa_manager, size, align)
  323. );
  324. }
  325. } while (!r);
  326. spin_unlock(&sa_manager->wq.lock);
  327. kfree(*sa_bo);
  328. *sa_bo = NULL;
  329. return r;
  330. }
  331. void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
  332. struct fence *fence)
  333. {
  334. struct amdgpu_sa_manager *sa_manager;
  335. if (sa_bo == NULL || *sa_bo == NULL) {
  336. return;
  337. }
  338. sa_manager = (*sa_bo)->manager;
  339. spin_lock(&sa_manager->wq.lock);
  340. if (fence && !fence_is_signaled(fence)) {
  341. uint32_t idx;
  342. (*sa_bo)->fence = fence_get(fence);
  343. idx = amdgpu_ring_from_fence(fence)->idx;
  344. list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
  345. } else {
  346. amdgpu_sa_bo_remove_locked(*sa_bo);
  347. }
  348. wake_up_all_locked(&sa_manager->wq);
  349. spin_unlock(&sa_manager->wq.lock);
  350. *sa_bo = NULL;
  351. }
  352. #if defined(CONFIG_DEBUG_FS)
  353. static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
  354. {
  355. struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
  356. struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
  357. if (a_fence)
  358. seq_printf(m, " protected by 0x%016llx on ring %d",
  359. a_fence->seq, a_fence->ring->idx);
  360. if (s_fence) {
  361. struct amdgpu_ring *ring;
  362. ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
  363. seq_printf(m, " protected by 0x%016x on ring %d",
  364. s_fence->base.seqno, ring->idx);
  365. }
  366. }
  367. void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
  368. struct seq_file *m)
  369. {
  370. struct amdgpu_sa_bo *i;
  371. spin_lock(&sa_manager->wq.lock);
  372. list_for_each_entry(i, &sa_manager->olist, olist) {
  373. uint64_t soffset = i->soffset + sa_manager->gpu_addr;
  374. uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
  375. if (&i->olist == sa_manager->hole) {
  376. seq_printf(m, ">");
  377. } else {
  378. seq_printf(m, " ");
  379. }
  380. seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
  381. soffset, eoffset, eoffset - soffset);
  382. if (i->fence)
  383. amdgpu_sa_bo_dump_fence(i->fence, m);
  384. seq_printf(m, "\n");
  385. }
  386. spin_unlock(&sa_manager->wq.lock);
  387. }
  388. #endif