drm_gem_cma_helper.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /*
  2. * drm gem CMA (contiguous memory allocator) helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. *
  6. * Based on Samsung Exynos code
  7. *
  8. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/mm.h>
  20. #include <linux/slab.h>
  21. #include <linux/mutex.h>
  22. #include <linux/export.h>
  23. #include <linux/dma-buf.h>
  24. #include <linux/dma-mapping.h>
  25. #include <drm/drmP.h>
  26. #include <drm/drm.h>
  27. #include <drm/drm_gem_cma_helper.h>
  28. #include <drm/drm_vma_manager.h>
  29. /**
  30. * DOC: cma helpers
  31. *
  32. * The Contiguous Memory Allocator reserves a pool of memory at early boot
  33. * that is used to service requests for large blocks of contiguous memory.
  34. *
  35. * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
  36. * objects that are physically contiguous in memory. This is useful for
  37. * display drivers that are unable to map scattered buffers via an IOMMU.
  38. */
  39. /**
  40. * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
  41. * @drm: DRM device
  42. * @size: size of the object to allocate
  43. *
  44. * This function creates and initializes a GEM CMA object of the given size,
  45. * but doesn't allocate any memory to back the object.
  46. *
  47. * Returns:
  48. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  49. * error code on failure.
  50. */
  51. static struct drm_gem_cma_object *
  52. __drm_gem_cma_create(struct drm_device *drm, size_t size)
  53. {
  54. struct drm_gem_cma_object *cma_obj;
  55. struct drm_gem_object *gem_obj;
  56. int ret;
  57. cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
  58. if (!cma_obj)
  59. return ERR_PTR(-ENOMEM);
  60. gem_obj = &cma_obj->base;
  61. ret = drm_gem_object_init(drm, gem_obj, size);
  62. if (ret)
  63. goto error;
  64. ret = drm_gem_create_mmap_offset(gem_obj);
  65. if (ret) {
  66. drm_gem_object_release(gem_obj);
  67. goto error;
  68. }
  69. return cma_obj;
  70. error:
  71. kfree(cma_obj);
  72. return ERR_PTR(ret);
  73. }
  74. /**
  75. * drm_gem_cma_create - allocate an object with the given size
  76. * @drm: DRM device
  77. * @size: size of the object to allocate
  78. *
  79. * This function creates a CMA GEM object and allocates a contiguous chunk of
  80. * memory as backing store. The backing memory has the writecombine attribute
  81. * set.
  82. *
  83. * Returns:
  84. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  85. * error code on failure.
  86. */
  87. struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
  88. size_t size)
  89. {
  90. struct drm_gem_cma_object *cma_obj;
  91. int ret;
  92. size = round_up(size, PAGE_SIZE);
  93. cma_obj = __drm_gem_cma_create(drm, size);
  94. if (IS_ERR(cma_obj))
  95. return cma_obj;
  96. cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
  97. &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
  98. if (!cma_obj->vaddr) {
  99. dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
  100. size);
  101. ret = -ENOMEM;
  102. goto error;
  103. }
  104. return cma_obj;
  105. error:
  106. drm_gem_cma_free_object(&cma_obj->base);
  107. return ERR_PTR(ret);
  108. }
  109. EXPORT_SYMBOL_GPL(drm_gem_cma_create);
  110. /**
  111. * drm_gem_cma_create_with_handle - allocate an object with the given size and
  112. * return a GEM handle to it
  113. * @file_priv: DRM file-private structure to register the handle for
  114. * @drm: DRM device
  115. * @size: size of the object to allocate
  116. * @handle: return location for the GEM handle
  117. *
  118. * This function creates a CMA GEM object, allocating a physically contiguous
  119. * chunk of memory as backing store. The GEM object is then added to the list
  120. * of object associated with the given file and a handle to it is returned.
  121. *
  122. * Returns:
  123. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  124. * error code on failure.
  125. */
  126. static struct drm_gem_cma_object *
  127. drm_gem_cma_create_with_handle(struct drm_file *file_priv,
  128. struct drm_device *drm, size_t size,
  129. uint32_t *handle)
  130. {
  131. struct drm_gem_cma_object *cma_obj;
  132. struct drm_gem_object *gem_obj;
  133. int ret;
  134. cma_obj = drm_gem_cma_create(drm, size);
  135. if (IS_ERR(cma_obj))
  136. return cma_obj;
  137. gem_obj = &cma_obj->base;
  138. /*
  139. * allocate a id of idr table where the obj is registered
  140. * and handle has the id what user can see.
  141. */
  142. ret = drm_gem_handle_create(file_priv, gem_obj, handle);
  143. if (ret)
  144. goto err_handle_create;
  145. /* drop reference from allocate - handle holds it now. */
  146. drm_gem_object_unreference_unlocked(gem_obj);
  147. return cma_obj;
  148. err_handle_create:
  149. drm_gem_cma_free_object(gem_obj);
  150. return ERR_PTR(ret);
  151. }
  152. /**
  153. * drm_gem_cma_free_object - free resources associated with a CMA GEM object
  154. * @gem_obj: GEM object to free
  155. *
  156. * This function frees the backing memory of the CMA GEM object, cleans up the
  157. * GEM object state and frees the memory used to store the object itself.
  158. * Drivers using the CMA helpers should set this as their DRM driver's
  159. * ->gem_free_object() callback.
  160. */
  161. void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
  162. {
  163. struct drm_gem_cma_object *cma_obj;
  164. cma_obj = to_drm_gem_cma_obj(gem_obj);
  165. if (cma_obj->vaddr) {
  166. dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
  167. cma_obj->vaddr, cma_obj->paddr);
  168. } else if (gem_obj->import_attach) {
  169. drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
  170. }
  171. drm_gem_object_release(gem_obj);
  172. kfree(cma_obj);
  173. }
  174. EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
  175. /**
  176. * drm_gem_cma_dumb_create_internal - create a dumb buffer object
  177. * @file_priv: DRM file-private structure to create the dumb buffer for
  178. * @drm: DRM device
  179. * @args: IOCTL data
  180. *
  181. * This aligns the pitch and size arguments to the minimum required. This is
  182. * an internal helper that can be wrapped by a driver to account for hardware
  183. * with more specific alignment requirements. It should not be used directly
  184. * as the ->dumb_create() callback in a DRM driver.
  185. *
  186. * Returns:
  187. * 0 on success or a negative error code on failure.
  188. */
  189. int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
  190. struct drm_device *drm,
  191. struct drm_mode_create_dumb *args)
  192. {
  193. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  194. struct drm_gem_cma_object *cma_obj;
  195. if (args->pitch < min_pitch)
  196. args->pitch = min_pitch;
  197. if (args->size < args->pitch * args->height)
  198. args->size = args->pitch * args->height;
  199. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  200. &args->handle);
  201. return PTR_ERR_OR_ZERO(cma_obj);
  202. }
  203. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
  204. /**
  205. * drm_gem_cma_dumb_create - create a dumb buffer object
  206. * @file_priv: DRM file-private structure to create the dumb buffer for
  207. * @drm: DRM device
  208. * @args: IOCTL data
  209. *
  210. * This function computes the pitch of the dumb buffer and rounds it up to an
  211. * integer number of bytes per pixel. Drivers for hardware that doesn't have
  212. * any additional restrictions on the pitch can directly use this function as
  213. * their ->dumb_create() callback.
  214. *
  215. * For hardware with additional restrictions, drivers can adjust the fields
  216. * set up by userspace and pass the IOCTL data along to the
  217. * drm_gem_cma_dumb_create_internal() function.
  218. *
  219. * Returns:
  220. * 0 on success or a negative error code on failure.
  221. */
  222. int drm_gem_cma_dumb_create(struct drm_file *file_priv,
  223. struct drm_device *drm,
  224. struct drm_mode_create_dumb *args)
  225. {
  226. struct drm_gem_cma_object *cma_obj;
  227. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  228. args->size = args->pitch * args->height;
  229. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  230. &args->handle);
  231. return PTR_ERR_OR_ZERO(cma_obj);
  232. }
  233. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
  234. /**
  235. * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
  236. * object
  237. * @file_priv: DRM file-private structure containing the GEM object
  238. * @drm: DRM device
  239. * @handle: GEM object handle
  240. * @offset: return location for the fake mmap offset
  241. *
  242. * This function look up an object by its handle and returns the fake mmap
  243. * offset associated with it. Drivers using the CMA helpers should set this
  244. * as their DRM driver's ->dumb_map_offset() callback.
  245. *
  246. * Returns:
  247. * 0 on success or a negative error code on failure.
  248. */
  249. int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
  250. struct drm_device *drm, u32 handle,
  251. u64 *offset)
  252. {
  253. struct drm_gem_object *gem_obj;
  254. gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
  255. if (!gem_obj) {
  256. dev_err(drm->dev, "failed to lookup GEM object\n");
  257. return -EINVAL;
  258. }
  259. *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
  260. drm_gem_object_unreference_unlocked(gem_obj);
  261. return 0;
  262. }
  263. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
  264. const struct vm_operations_struct drm_gem_cma_vm_ops = {
  265. .open = drm_gem_vm_open,
  266. .close = drm_gem_vm_close,
  267. };
  268. EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
  269. static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
  270. struct vm_area_struct *vma)
  271. {
  272. int ret;
  273. /*
  274. * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
  275. * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
  276. * the whole buffer.
  277. */
  278. vma->vm_flags &= ~VM_PFNMAP;
  279. vma->vm_pgoff = 0;
  280. ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
  281. cma_obj->vaddr, cma_obj->paddr,
  282. vma->vm_end - vma->vm_start);
  283. if (ret)
  284. drm_gem_vm_close(vma);
  285. return ret;
  286. }
  287. /**
  288. * drm_gem_cma_mmap - memory-map a CMA GEM object
  289. * @filp: file object
  290. * @vma: VMA for the area to be mapped
  291. *
  292. * This function implements an augmented version of the GEM DRM file mmap
  293. * operation for CMA objects: In addition to the usual GEM VMA setup it
  294. * immediately faults in the entire object instead of using on-demaind
  295. * faulting. Drivers which employ the CMA helpers should use this function
  296. * as their ->mmap() handler in the DRM device file's file_operations
  297. * structure.
  298. *
  299. * Returns:
  300. * 0 on success or a negative error code on failure.
  301. */
  302. int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
  303. {
  304. struct drm_gem_cma_object *cma_obj;
  305. struct drm_gem_object *gem_obj;
  306. int ret;
  307. ret = drm_gem_mmap(filp, vma);
  308. if (ret)
  309. return ret;
  310. gem_obj = vma->vm_private_data;
  311. cma_obj = to_drm_gem_cma_obj(gem_obj);
  312. return drm_gem_cma_mmap_obj(cma_obj, vma);
  313. }
  314. EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
  315. #ifdef CONFIG_DEBUG_FS
  316. /**
  317. * drm_gem_cma_describe - describe a CMA GEM object for debugfs
  318. * @cma_obj: CMA GEM object
  319. * @m: debugfs file handle
  320. *
  321. * This function can be used to dump a human-readable representation of the
  322. * CMA GEM object into a synthetic file.
  323. */
  324. void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
  325. struct seq_file *m)
  326. {
  327. struct drm_gem_object *obj = &cma_obj->base;
  328. uint64_t off;
  329. off = drm_vma_node_start(&obj->vma_node);
  330. seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
  331. obj->name, obj->refcount.refcount.counter,
  332. off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
  333. seq_printf(m, "\n");
  334. }
  335. EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
  336. #endif
  337. /**
  338. * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
  339. * pages for a CMA GEM object
  340. * @obj: GEM object
  341. *
  342. * This function exports a scatter/gather table suitable for PRIME usage by
  343. * calling the standard DMA mapping API. Drivers using the CMA helpers should
  344. * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
  345. *
  346. * Returns:
  347. * A pointer to the scatter/gather table of pinned pages or NULL on failure.
  348. */
  349. struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
  350. {
  351. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  352. struct sg_table *sgt;
  353. int ret;
  354. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  355. if (!sgt)
  356. return NULL;
  357. ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
  358. cma_obj->paddr, obj->size);
  359. if (ret < 0)
  360. goto out;
  361. return sgt;
  362. out:
  363. kfree(sgt);
  364. return NULL;
  365. }
  366. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
  367. /**
  368. * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
  369. * driver's scatter/gather table of pinned pages
  370. * @dev: device to import into
  371. * @attach: DMA-BUF attachment
  372. * @sgt: scatter/gather table of pinned pages
  373. *
  374. * This function imports a scatter/gather table exported via DMA-BUF by
  375. * another driver. Imported buffers must be physically contiguous in memory
  376. * (i.e. the scatter/gather table must contain a single entry). Drivers that
  377. * use the CMA helpers should set this as their DRM driver's
  378. * ->gem_prime_import_sg_table() callback.
  379. *
  380. * Returns:
  381. * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
  382. * error code on failure.
  383. */
  384. struct drm_gem_object *
  385. drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
  386. struct dma_buf_attachment *attach,
  387. struct sg_table *sgt)
  388. {
  389. struct drm_gem_cma_object *cma_obj;
  390. if (sgt->nents != 1)
  391. return ERR_PTR(-EINVAL);
  392. /* Create a CMA GEM buffer. */
  393. cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
  394. if (IS_ERR(cma_obj))
  395. return ERR_CAST(cma_obj);
  396. cma_obj->paddr = sg_dma_address(sgt->sgl);
  397. cma_obj->sgt = sgt;
  398. DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
  399. return &cma_obj->base;
  400. }
  401. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
  402. /**
  403. * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
  404. * @obj: GEM object
  405. * @vma: VMA for the area to be mapped
  406. *
  407. * This function maps a buffer imported via DRM PRIME into a userspace
  408. * process's address space. Drivers that use the CMA helpers should set this
  409. * as their DRM driver's ->gem_prime_mmap() callback.
  410. *
  411. * Returns:
  412. * 0 on success or a negative error code on failure.
  413. */
  414. int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
  415. struct vm_area_struct *vma)
  416. {
  417. struct drm_gem_cma_object *cma_obj;
  418. int ret;
  419. ret = drm_gem_mmap_obj(obj, obj->size, vma);
  420. if (ret < 0)
  421. return ret;
  422. cma_obj = to_drm_gem_cma_obj(obj);
  423. return drm_gem_cma_mmap_obj(cma_obj, vma);
  424. }
  425. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
  426. /**
  427. * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
  428. * address space
  429. * @obj: GEM object
  430. *
  431. * This function maps a buffer exported via DRM PRIME into the kernel's
  432. * virtual address space. Since the CMA buffers are already mapped into the
  433. * kernel virtual address space this simply returns the cached virtual
  434. * address. Drivers using the CMA helpers should set this as their DRM
  435. * driver's ->gem_prime_vmap() callback.
  436. *
  437. * Returns:
  438. * The kernel virtual address of the CMA GEM object's backing store.
  439. */
  440. void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
  441. {
  442. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  443. return cma_obj->vaddr;
  444. }
  445. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
  446. /**
  447. * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
  448. * address space
  449. * @obj: GEM object
  450. * @vaddr: kernel virtual address where the CMA GEM object was mapped
  451. *
  452. * This function removes a buffer exported via DRM PRIME from the kernel's
  453. * virtual address space. This is a no-op because CMA buffers cannot be
  454. * unmapped from kernel space. Drivers using the CMA helpers should set this
  455. * as their DRM driver's ->gem_prime_vunmap() callback.
  456. */
  457. void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  458. {
  459. /* Nothing to do */
  460. }
  461. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);