i915_gem_batch_pool.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /*
  2. * Copyright © 2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include "i915_drv.h"
  25. #include "i915_gem_batch_pool.h"
  26. /**
  27. * DOC: batch pool
  28. *
  29. * In order to submit batch buffers as 'secure', the software command parser
  30. * must ensure that a batch buffer cannot be modified after parsing. It does
  31. * this by copying the user provided batch buffer contents to a kernel owned
  32. * buffer from which the hardware will actually execute, and by carefully
  33. * managing the address space bindings for such buffers.
  34. *
  35. * The batch pool framework provides a mechanism for the driver to manage a
  36. * set of scratch buffers to use for this purpose. The framework can be
  37. * extended to support other uses cases should they arise.
  38. */
  39. /**
  40. * i915_gem_batch_pool_init() - initialize a batch buffer pool
  41. * @dev: the drm device
  42. * @pool: the batch buffer pool
  43. */
  44. void i915_gem_batch_pool_init(struct drm_device *dev,
  45. struct i915_gem_batch_pool *pool)
  46. {
  47. int n;
  48. pool->dev = dev;
  49. for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
  50. INIT_LIST_HEAD(&pool->cache_list[n]);
  51. }
  52. /**
  53. * i915_gem_batch_pool_fini() - clean up a batch buffer pool
  54. * @pool: the pool to clean up
  55. *
  56. * Note: Callers must hold the struct_mutex.
  57. */
  58. void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
  59. {
  60. int n;
  61. WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
  62. for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
  63. while (!list_empty(&pool->cache_list[n])) {
  64. struct drm_i915_gem_object *obj =
  65. list_first_entry(&pool->cache_list[n],
  66. struct drm_i915_gem_object,
  67. batch_pool_link);
  68. list_del(&obj->batch_pool_link);
  69. drm_gem_object_unreference(&obj->base);
  70. }
  71. }
  72. }
  73. /**
  74. * i915_gem_batch_pool_get() - allocate a buffer from the pool
  75. * @pool: the batch buffer pool
  76. * @size: the minimum desired size of the returned buffer
  77. *
  78. * Returns an inactive buffer from @pool with at least @size bytes,
  79. * with the pages pinned. The caller must i915_gem_object_unpin_pages()
  80. * on the returned object.
  81. *
  82. * Note: Callers must hold the struct_mutex
  83. *
  84. * Return: the buffer object or an error pointer
  85. */
  86. struct drm_i915_gem_object *
  87. i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
  88. size_t size)
  89. {
  90. struct drm_i915_gem_object *obj = NULL;
  91. struct drm_i915_gem_object *tmp, *next;
  92. struct list_head *list;
  93. int n;
  94. WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
  95. /* Compute a power-of-two bucket, but throw everything greater than
  96. * 16KiB into the same bucket: i.e. the the buckets hold objects of
  97. * (1 page, 2 pages, 4 pages, 8+ pages).
  98. */
  99. n = fls(size >> PAGE_SHIFT) - 1;
  100. if (n >= ARRAY_SIZE(pool->cache_list))
  101. n = ARRAY_SIZE(pool->cache_list) - 1;
  102. list = &pool->cache_list[n];
  103. list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
  104. /* The batches are strictly LRU ordered */
  105. if (tmp->active)
  106. break;
  107. /* While we're looping, do some clean up */
  108. if (tmp->madv == __I915_MADV_PURGED) {
  109. list_del(&tmp->batch_pool_link);
  110. drm_gem_object_unreference(&tmp->base);
  111. continue;
  112. }
  113. if (tmp->base.size >= size) {
  114. obj = tmp;
  115. break;
  116. }
  117. }
  118. if (obj == NULL) {
  119. int ret;
  120. obj = i915_gem_alloc_object(pool->dev, size);
  121. if (obj == NULL)
  122. return ERR_PTR(-ENOMEM);
  123. ret = i915_gem_object_get_pages(obj);
  124. if (ret)
  125. return ERR_PTR(ret);
  126. obj->madv = I915_MADV_DONTNEED;
  127. }
  128. list_move_tail(&obj->batch_pool_link, list);
  129. i915_gem_object_pin_pages(obj);
  130. return obj;
  131. }