gem.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * NVIDIA Tegra DRM GEM helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
  6. *
  7. * Based on the GEM/CMA helpers
  8. *
  9. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #include <linux/dma-buf.h>
  16. #include <linux/iommu.h>
  17. #include <drm/tegra_drm.h>
  18. #include "drm.h"
  19. #include "gem.h"
  20. static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
  21. {
  22. return container_of(bo, struct tegra_bo, base);
  23. }
  24. static void tegra_bo_put(struct host1x_bo *bo)
  25. {
  26. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  27. struct drm_device *drm = obj->gem.dev;
  28. mutex_lock(&drm->struct_mutex);
  29. drm_gem_object_unreference(&obj->gem);
  30. mutex_unlock(&drm->struct_mutex);
  31. }
  32. static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  33. {
  34. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  35. return obj->paddr;
  36. }
  37. static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  38. {
  39. }
  40. static void *tegra_bo_mmap(struct host1x_bo *bo)
  41. {
  42. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  43. return obj->vaddr;
  44. }
  45. static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  46. {
  47. }
  48. static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  49. {
  50. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  51. return obj->vaddr + page * PAGE_SIZE;
  52. }
  53. static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  54. void *addr)
  55. {
  56. }
  57. static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  58. {
  59. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  60. struct drm_device *drm = obj->gem.dev;
  61. mutex_lock(&drm->struct_mutex);
  62. drm_gem_object_reference(&obj->gem);
  63. mutex_unlock(&drm->struct_mutex);
  64. return bo;
  65. }
  66. static const struct host1x_bo_ops tegra_bo_ops = {
  67. .get = tegra_bo_get,
  68. .put = tegra_bo_put,
  69. .pin = tegra_bo_pin,
  70. .unpin = tegra_bo_unpin,
  71. .mmap = tegra_bo_mmap,
  72. .munmap = tegra_bo_munmap,
  73. .kmap = tegra_bo_kmap,
  74. .kunmap = tegra_bo_kunmap,
  75. };
  76. static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
  77. {
  78. int prot = IOMMU_READ | IOMMU_WRITE;
  79. ssize_t err;
  80. if (bo->mm)
  81. return -EBUSY;
  82. bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
  83. if (!bo->mm)
  84. return -ENOMEM;
  85. err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
  86. PAGE_SIZE, 0, 0, 0);
  87. if (err < 0) {
  88. dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
  89. err);
  90. goto free;
  91. }
  92. bo->paddr = bo->mm->start;
  93. err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
  94. bo->sgt->nents, prot);
  95. if (err < 0) {
  96. dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
  97. goto remove;
  98. }
  99. bo->size = err;
  100. return 0;
  101. remove:
  102. drm_mm_remove_node(bo->mm);
  103. free:
  104. kfree(bo->mm);
  105. return err;
  106. }
  107. static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
  108. {
  109. if (!bo->mm)
  110. return 0;
  111. iommu_unmap(tegra->domain, bo->paddr, bo->size);
  112. drm_mm_remove_node(bo->mm);
  113. kfree(bo->mm);
  114. return 0;
  115. }
  116. static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
  117. size_t size)
  118. {
  119. struct tegra_bo *bo;
  120. int err;
  121. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  122. if (!bo)
  123. return ERR_PTR(-ENOMEM);
  124. host1x_bo_init(&bo->base, &tegra_bo_ops);
  125. size = round_up(size, PAGE_SIZE);
  126. err = drm_gem_object_init(drm, &bo->gem, size);
  127. if (err < 0)
  128. goto free;
  129. err = drm_gem_create_mmap_offset(&bo->gem);
  130. if (err < 0)
  131. goto release;
  132. return bo;
  133. release:
  134. drm_gem_object_release(&bo->gem);
  135. free:
  136. kfree(bo);
  137. return ERR_PTR(err);
  138. }
  139. static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
  140. {
  141. if (bo->pages) {
  142. drm_gem_put_pages(&bo->gem, bo->pages, true, true);
  143. sg_free_table(bo->sgt);
  144. kfree(bo->sgt);
  145. } else if (bo->vaddr) {
  146. dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
  147. bo->paddr);
  148. }
  149. }
  150. static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
  151. {
  152. struct scatterlist *s;
  153. unsigned int i;
  154. bo->pages = drm_gem_get_pages(&bo->gem);
  155. if (IS_ERR(bo->pages))
  156. return PTR_ERR(bo->pages);
  157. bo->num_pages = bo->gem.size >> PAGE_SHIFT;
  158. bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
  159. if (IS_ERR(bo->sgt))
  160. goto put_pages;
  161. /*
  162. * Fake up the SG table so that dma_sync_sg_for_device() can be used
  163. * to flush the pages associated with it.
  164. *
  165. * TODO: Replace this by drm_clflash_sg() once it can be implemented
  166. * without relying on symbols that are not exported.
  167. */
  168. for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
  169. sg_dma_address(s) = sg_phys(s);
  170. dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
  171. DMA_TO_DEVICE);
  172. return 0;
  173. put_pages:
  174. drm_gem_put_pages(&bo->gem, bo->pages, false, false);
  175. return PTR_ERR(bo->sgt);
  176. }
  177. static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
  178. {
  179. struct tegra_drm *tegra = drm->dev_private;
  180. int err;
  181. if (tegra->domain) {
  182. err = tegra_bo_get_pages(drm, bo);
  183. if (err < 0)
  184. return err;
  185. err = tegra_bo_iommu_map(tegra, bo);
  186. if (err < 0) {
  187. tegra_bo_free(drm, bo);
  188. return err;
  189. }
  190. } else {
  191. size_t size = bo->gem.size;
  192. bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
  193. GFP_KERNEL | __GFP_NOWARN);
  194. if (!bo->vaddr) {
  195. dev_err(drm->dev,
  196. "failed to allocate buffer of size %zu\n",
  197. size);
  198. return -ENOMEM;
  199. }
  200. }
  201. return 0;
  202. }
  203. struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
  204. unsigned long flags)
  205. {
  206. struct tegra_bo *bo;
  207. int err;
  208. bo = tegra_bo_alloc_object(drm, size);
  209. if (IS_ERR(bo))
  210. return bo;
  211. err = tegra_bo_alloc(drm, bo);
  212. if (err < 0)
  213. goto release;
  214. if (flags & DRM_TEGRA_GEM_CREATE_TILED)
  215. bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
  216. if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
  217. bo->flags |= TEGRA_BO_BOTTOM_UP;
  218. return bo;
  219. release:
  220. drm_gem_object_release(&bo->gem);
  221. kfree(bo);
  222. return ERR_PTR(err);
  223. }
  224. struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
  225. struct drm_device *drm,
  226. size_t size,
  227. unsigned long flags,
  228. u32 *handle)
  229. {
  230. struct tegra_bo *bo;
  231. int err;
  232. bo = tegra_bo_create(drm, size, flags);
  233. if (IS_ERR(bo))
  234. return bo;
  235. err = drm_gem_handle_create(file, &bo->gem, handle);
  236. if (err) {
  237. tegra_bo_free_object(&bo->gem);
  238. return ERR_PTR(err);
  239. }
  240. drm_gem_object_unreference_unlocked(&bo->gem);
  241. return bo;
  242. }
  243. static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
  244. struct dma_buf *buf)
  245. {
  246. struct tegra_drm *tegra = drm->dev_private;
  247. struct dma_buf_attachment *attach;
  248. struct tegra_bo *bo;
  249. int err;
  250. bo = tegra_bo_alloc_object(drm, buf->size);
  251. if (IS_ERR(bo))
  252. return bo;
  253. attach = dma_buf_attach(buf, drm->dev);
  254. if (IS_ERR(attach)) {
  255. err = PTR_ERR(attach);
  256. goto free;
  257. }
  258. get_dma_buf(buf);
  259. bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
  260. if (!bo->sgt) {
  261. err = -ENOMEM;
  262. goto detach;
  263. }
  264. if (IS_ERR(bo->sgt)) {
  265. err = PTR_ERR(bo->sgt);
  266. goto detach;
  267. }
  268. if (tegra->domain) {
  269. err = tegra_bo_iommu_map(tegra, bo);
  270. if (err < 0)
  271. goto detach;
  272. } else {
  273. if (bo->sgt->nents > 1) {
  274. err = -EINVAL;
  275. goto detach;
  276. }
  277. bo->paddr = sg_dma_address(bo->sgt->sgl);
  278. }
  279. bo->gem.import_attach = attach;
  280. return bo;
  281. detach:
  282. if (!IS_ERR_OR_NULL(bo->sgt))
  283. dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
  284. dma_buf_detach(buf, attach);
  285. dma_buf_put(buf);
  286. free:
  287. drm_gem_object_release(&bo->gem);
  288. kfree(bo);
  289. return ERR_PTR(err);
  290. }
  291. void tegra_bo_free_object(struct drm_gem_object *gem)
  292. {
  293. struct tegra_drm *tegra = gem->dev->dev_private;
  294. struct tegra_bo *bo = to_tegra_bo(gem);
  295. if (tegra->domain)
  296. tegra_bo_iommu_unmap(tegra, bo);
  297. if (gem->import_attach) {
  298. dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
  299. DMA_TO_DEVICE);
  300. drm_prime_gem_destroy(gem, NULL);
  301. } else {
  302. tegra_bo_free(gem->dev, bo);
  303. }
  304. drm_gem_object_release(gem);
  305. kfree(bo);
  306. }
  307. int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
  308. struct drm_mode_create_dumb *args)
  309. {
  310. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  311. struct tegra_drm *tegra = drm->dev_private;
  312. struct tegra_bo *bo;
  313. args->pitch = round_up(min_pitch, tegra->pitch_align);
  314. args->size = args->pitch * args->height;
  315. bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
  316. &args->handle);
  317. if (IS_ERR(bo))
  318. return PTR_ERR(bo);
  319. return 0;
  320. }
  321. int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
  322. u32 handle, u64 *offset)
  323. {
  324. struct drm_gem_object *gem;
  325. struct tegra_bo *bo;
  326. mutex_lock(&drm->struct_mutex);
  327. gem = drm_gem_object_lookup(drm, file, handle);
  328. if (!gem) {
  329. dev_err(drm->dev, "failed to lookup GEM object\n");
  330. mutex_unlock(&drm->struct_mutex);
  331. return -EINVAL;
  332. }
  333. bo = to_tegra_bo(gem);
  334. *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  335. drm_gem_object_unreference(gem);
  336. mutex_unlock(&drm->struct_mutex);
  337. return 0;
  338. }
  339. static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  340. {
  341. struct drm_gem_object *gem = vma->vm_private_data;
  342. struct tegra_bo *bo = to_tegra_bo(gem);
  343. struct page *page;
  344. pgoff_t offset;
  345. int err;
  346. if (!bo->pages)
  347. return VM_FAULT_SIGBUS;
  348. offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
  349. page = bo->pages[offset];
  350. err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
  351. switch (err) {
  352. case -EAGAIN:
  353. case 0:
  354. case -ERESTARTSYS:
  355. case -EINTR:
  356. case -EBUSY:
  357. return VM_FAULT_NOPAGE;
  358. case -ENOMEM:
  359. return VM_FAULT_OOM;
  360. }
  361. return VM_FAULT_SIGBUS;
  362. }
  363. const struct vm_operations_struct tegra_bo_vm_ops = {
  364. .fault = tegra_bo_fault,
  365. .open = drm_gem_vm_open,
  366. .close = drm_gem_vm_close,
  367. };
  368. int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
  369. {
  370. struct drm_gem_object *gem;
  371. struct tegra_bo *bo;
  372. int ret;
  373. ret = drm_gem_mmap(file, vma);
  374. if (ret)
  375. return ret;
  376. gem = vma->vm_private_data;
  377. bo = to_tegra_bo(gem);
  378. if (!bo->pages) {
  379. unsigned long vm_pgoff = vma->vm_pgoff;
  380. vma->vm_flags &= ~VM_PFNMAP;
  381. vma->vm_pgoff = 0;
  382. ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
  383. bo->paddr, gem->size);
  384. if (ret) {
  385. drm_gem_vm_close(vma);
  386. return ret;
  387. }
  388. vma->vm_pgoff = vm_pgoff;
  389. } else {
  390. pgprot_t prot = vm_get_page_prot(vma->vm_flags);
  391. vma->vm_flags |= VM_MIXEDMAP;
  392. vma->vm_flags &= ~VM_PFNMAP;
  393. vma->vm_page_prot = pgprot_writecombine(prot);
  394. }
  395. return 0;
  396. }
  397. static struct sg_table *
  398. tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  399. enum dma_data_direction dir)
  400. {
  401. struct drm_gem_object *gem = attach->dmabuf->priv;
  402. struct tegra_bo *bo = to_tegra_bo(gem);
  403. struct sg_table *sgt;
  404. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  405. if (!sgt)
  406. return NULL;
  407. if (bo->pages) {
  408. struct scatterlist *sg;
  409. unsigned int i;
  410. if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
  411. goto free;
  412. for_each_sg(sgt->sgl, sg, bo->num_pages, i)
  413. sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
  414. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
  415. goto free;
  416. } else {
  417. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  418. goto free;
  419. sg_dma_address(sgt->sgl) = bo->paddr;
  420. sg_dma_len(sgt->sgl) = gem->size;
  421. }
  422. return sgt;
  423. free:
  424. sg_free_table(sgt);
  425. kfree(sgt);
  426. return NULL;
  427. }
  428. static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  429. struct sg_table *sgt,
  430. enum dma_data_direction dir)
  431. {
  432. struct drm_gem_object *gem = attach->dmabuf->priv;
  433. struct tegra_bo *bo = to_tegra_bo(gem);
  434. if (bo->pages)
  435. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
  436. sg_free_table(sgt);
  437. kfree(sgt);
  438. }
  439. static void tegra_gem_prime_release(struct dma_buf *buf)
  440. {
  441. drm_gem_dmabuf_release(buf);
  442. }
  443. static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
  444. unsigned long page)
  445. {
  446. return NULL;
  447. }
  448. static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
  449. unsigned long page,
  450. void *addr)
  451. {
  452. }
  453. static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
  454. {
  455. return NULL;
  456. }
  457. static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
  458. void *addr)
  459. {
  460. }
  461. static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  462. {
  463. return -EINVAL;
  464. }
  465. static void *tegra_gem_prime_vmap(struct dma_buf *buf)
  466. {
  467. struct drm_gem_object *gem = buf->priv;
  468. struct tegra_bo *bo = to_tegra_bo(gem);
  469. return bo->vaddr;
  470. }
  471. static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
  472. {
  473. }
  474. static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
  475. .map_dma_buf = tegra_gem_prime_map_dma_buf,
  476. .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
  477. .release = tegra_gem_prime_release,
  478. .kmap_atomic = tegra_gem_prime_kmap_atomic,
  479. .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
  480. .kmap = tegra_gem_prime_kmap,
  481. .kunmap = tegra_gem_prime_kunmap,
  482. .mmap = tegra_gem_prime_mmap,
  483. .vmap = tegra_gem_prime_vmap,
  484. .vunmap = tegra_gem_prime_vunmap,
  485. };
  486. struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
  487. struct drm_gem_object *gem,
  488. int flags)
  489. {
  490. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  491. exp_info.ops = &tegra_gem_prime_dmabuf_ops;
  492. exp_info.size = gem->size;
  493. exp_info.flags = flags;
  494. exp_info.priv = gem;
  495. return dma_buf_export(&exp_info);
  496. }
  497. struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
  498. struct dma_buf *buf)
  499. {
  500. struct tegra_bo *bo;
  501. if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
  502. struct drm_gem_object *gem = buf->priv;
  503. if (gem->dev == drm) {
  504. drm_gem_object_reference(gem);
  505. return gem;
  506. }
  507. }
  508. bo = tegra_bo_import(drm, buf);
  509. if (IS_ERR(bo))
  510. return ERR_CAST(bo);
  511. return &bo->gem;
  512. }