drm_gem.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. #ifndef __DRM_GEM_H__
  2. #define __DRM_GEM_H__
  3. /*
  4. * GEM Graphics Execution Manager Driver Interfaces
  5. *
  6. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  7. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  8. * Copyright (c) 2009-2010, Code Aurora Forum.
  9. * All rights reserved.
  10. * Copyright © 2014 Intel Corporation
  11. * Daniel Vetter <daniel.vetter@ffwll.ch>
  12. *
  13. * Author: Rickard E. (Rik) Faith <faith@valinux.com>
  14. * Author: Gareth Hughes <gareth@valinux.com>
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a
  17. * copy of this software and associated documentation files (the "Software"),
  18. * to deal in the Software without restriction, including without limitation
  19. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  20. * and/or sell copies of the Software, and to permit persons to whom the
  21. * Software is furnished to do so, subject to the following conditions:
  22. *
  23. * The above copyright notice and this permission notice (including the next
  24. * paragraph) shall be included in all copies or substantial portions of the
  25. * Software.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  28. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  29. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  30. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  31. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  32. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  33. * OTHER DEALINGS IN THE SOFTWARE.
  34. */
  35. /**
  36. * This structure defines the drm_mm memory object, which will be used by the
  37. * DRM for its buffer objects.
  38. */
  39. struct drm_gem_object {
  40. /** Reference count of this object */
  41. struct kref refcount;
  42. /**
  43. * handle_count - gem file_priv handle count of this object
  44. *
  45. * Each handle also holds a reference. Note that when the handle_count
  46. * drops to 0 any global names (e.g. the id in the flink namespace) will
  47. * be cleared.
  48. *
  49. * Protected by dev->object_name_lock.
  50. * */
  51. unsigned handle_count;
  52. /** Related drm device */
  53. struct drm_device *dev;
  54. /** File representing the shmem storage */
  55. struct file *filp;
  56. /* Mapping info for this object */
  57. struct drm_vma_offset_node vma_node;
  58. /**
  59. * Size of the object, in bytes. Immutable over the object's
  60. * lifetime.
  61. */
  62. size_t size;
  63. /**
  64. * Global name for this object, starts at 1. 0 means unnamed.
  65. * Access is covered by the object_name_lock in the related drm_device
  66. */
  67. int name;
  68. /**
  69. * Memory domains. These monitor which caches contain read/write data
  70. * related to the object. When transitioning from one set of domains
  71. * to another, the driver is called to ensure that caches are suitably
  72. * flushed and invalidated
  73. */
  74. uint32_t read_domains;
  75. uint32_t write_domain;
  76. /**
  77. * While validating an exec operation, the
  78. * new read/write domain values are computed here.
  79. * They will be transferred to the above values
  80. * at the point that any cache flushing occurs
  81. */
  82. uint32_t pending_read_domains;
  83. uint32_t pending_write_domain;
  84. /**
  85. * dma_buf - dma buf associated with this GEM object
  86. *
  87. * Pointer to the dma-buf associated with this gem object (either
  88. * through importing or exporting). We break the resulting reference
  89. * loop when the last gem handle for this object is released.
  90. *
  91. * Protected by obj->object_name_lock
  92. */
  93. struct dma_buf *dma_buf;
  94. /**
  95. * import_attach - dma buf attachment backing this object
  96. *
  97. * Any foreign dma_buf imported as a gem object has this set to the
  98. * attachment point for the device. This is invariant over the lifetime
  99. * of a gem object.
  100. *
  101. * The driver's ->gem_free_object callback is responsible for cleaning
  102. * up the dma_buf attachment and references acquired at import time.
  103. *
  104. * Note that the drm gem/prime core does not depend upon drivers setting
  105. * this field any more. So for drivers where this doesn't make sense
  106. * (e.g. virtual devices or a displaylink behind an usb bus) they can
  107. * simply leave it as NULL.
  108. */
  109. struct dma_buf_attachment *import_attach;
  110. };
  111. void drm_gem_object_release(struct drm_gem_object *obj);
  112. void drm_gem_object_free(struct kref *kref);
  113. int drm_gem_object_init(struct drm_device *dev,
  114. struct drm_gem_object *obj, size_t size);
  115. void drm_gem_private_object_init(struct drm_device *dev,
  116. struct drm_gem_object *obj, size_t size);
  117. void drm_gem_vm_open(struct vm_area_struct *vma);
  118. void drm_gem_vm_close(struct vm_area_struct *vma);
  119. int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
  120. struct vm_area_struct *vma);
  121. int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
  122. static inline void
  123. drm_gem_object_reference(struct drm_gem_object *obj)
  124. {
  125. kref_get(&obj->refcount);
  126. }
  127. static inline void
  128. drm_gem_object_unreference(struct drm_gem_object *obj)
  129. {
  130. if (obj != NULL) {
  131. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  132. kref_put(&obj->refcount, drm_gem_object_free);
  133. }
  134. }
  135. static inline void
  136. drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
  137. {
  138. struct drm_device *dev;
  139. if (!obj)
  140. return;
  141. dev = obj->dev;
  142. if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
  143. mutex_unlock(&dev->struct_mutex);
  144. else
  145. might_lock(&dev->struct_mutex);
  146. }
  147. int drm_gem_handle_create(struct drm_file *file_priv,
  148. struct drm_gem_object *obj,
  149. u32 *handlep);
  150. int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
  151. void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
  152. int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
  153. int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
  154. struct page **drm_gem_get_pages(struct drm_gem_object *obj);
  155. void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
  156. bool dirty, bool accessed);
  157. struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
  158. struct drm_file *filp,
  159. u32 handle);
  160. int drm_gem_dumb_destroy(struct drm_file *file,
  161. struct drm_device *dev,
  162. uint32_t handle);
  163. #endif /* __DRM_GEM_H__ */