armada_gem.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /*
  2. * Copyright (C) 2012 Russell King
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/dma-buf.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/shmem_fs.h>
  11. #include <drm/drmP.h>
  12. #include "armada_drm.h"
  13. #include "armada_gem.h"
  14. #include <drm/armada_drm.h>
  15. #include "armada_ioctlP.h"
  16. static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  17. {
  18. struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
  19. unsigned long addr = (unsigned long)vmf->virtual_address;
  20. unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
  21. int ret;
  22. pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
  23. ret = vm_insert_pfn(vma, addr, pfn);
  24. switch (ret) {
  25. case 0:
  26. case -EBUSY:
  27. return VM_FAULT_NOPAGE;
  28. case -ENOMEM:
  29. return VM_FAULT_OOM;
  30. default:
  31. return VM_FAULT_SIGBUS;
  32. }
  33. }
  34. const struct vm_operations_struct armada_gem_vm_ops = {
  35. .fault = armada_gem_vm_fault,
  36. .open = drm_gem_vm_open,
  37. .close = drm_gem_vm_close,
  38. };
  39. static size_t roundup_gem_size(size_t size)
  40. {
  41. return roundup(size, PAGE_SIZE);
  42. }
  43. /* dev->struct_mutex is held here */
  44. void armada_gem_free_object(struct drm_gem_object *obj)
  45. {
  46. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  47. DRM_DEBUG_DRIVER("release obj %p\n", dobj);
  48. drm_gem_free_mmap_offset(&dobj->obj);
  49. if (dobj->page) {
  50. /* page backed memory */
  51. unsigned int order = get_order(dobj->obj.size);
  52. __free_pages(dobj->page, order);
  53. } else if (dobj->linear) {
  54. /* linear backed memory */
  55. drm_mm_remove_node(dobj->linear);
  56. kfree(dobj->linear);
  57. if (dobj->addr)
  58. iounmap(dobj->addr);
  59. }
  60. if (dobj->obj.import_attach) {
  61. /* We only ever display imported data */
  62. if (dobj->sgt)
  63. dma_buf_unmap_attachment(dobj->obj.import_attach,
  64. dobj->sgt, DMA_TO_DEVICE);
  65. drm_prime_gem_destroy(&dobj->obj, NULL);
  66. }
  67. drm_gem_object_release(&dobj->obj);
  68. kfree(dobj);
  69. }
  70. int
  71. armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
  72. {
  73. struct armada_private *priv = dev->dev_private;
  74. size_t size = obj->obj.size;
  75. if (obj->page || obj->linear)
  76. return 0;
  77. /*
  78. * If it is a small allocation (typically cursor, which will
  79. * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
  80. * Framebuffers will never be this small (our minimum size for
  81. * framebuffers is larger than this anyway.) Such objects are
  82. * only accessed by the CPU so we don't need any special handing
  83. * here.
  84. */
  85. if (size <= 8192) {
  86. unsigned int order = get_order(size);
  87. struct page *p = alloc_pages(GFP_KERNEL, order);
  88. if (p) {
  89. obj->addr = page_address(p);
  90. obj->phys_addr = page_to_phys(p);
  91. obj->page = p;
  92. memset(obj->addr, 0, PAGE_ALIGN(size));
  93. }
  94. }
  95. /*
  96. * We could grab something from CMA if it's enabled, but that
  97. * involves building in a problem:
  98. *
  99. * CMA's interface uses dma_alloc_coherent(), which provides us
  100. * with an CPU virtual address and a device address.
  101. *
  102. * The CPU virtual address may be either an address in the kernel
  103. * direct mapped region (for example, as it would be on x86) or
  104. * it may be remapped into another part of kernel memory space
  105. * (eg, as it would be on ARM.) This means virt_to_phys() on the
  106. * returned virtual address is invalid depending on the architecture
  107. * implementation.
  108. *
  109. * The device address may also not be a physical address; it may
  110. * be that there is some kind of remapping between the device and
  111. * system RAM, which makes the use of the device address also
  112. * unsafe to re-use as a physical address.
  113. *
  114. * This makes DRM usage of dma_alloc_coherent() in a generic way
  115. * at best very questionable and unsafe.
  116. */
  117. /* Otherwise, grab it from our linear allocation */
  118. if (!obj->page) {
  119. struct drm_mm_node *node;
  120. unsigned align = min_t(unsigned, size, SZ_2M);
  121. void __iomem *ptr;
  122. int ret;
  123. node = kzalloc(sizeof(*node), GFP_KERNEL);
  124. if (!node)
  125. return -ENOSPC;
  126. mutex_lock(&dev->struct_mutex);
  127. ret = drm_mm_insert_node(&priv->linear, node, size, align,
  128. DRM_MM_SEARCH_DEFAULT);
  129. mutex_unlock(&dev->struct_mutex);
  130. if (ret) {
  131. kfree(node);
  132. return ret;
  133. }
  134. obj->linear = node;
  135. /* Ensure that the memory we're returning is cleared. */
  136. ptr = ioremap_wc(obj->linear->start, size);
  137. if (!ptr) {
  138. mutex_lock(&dev->struct_mutex);
  139. drm_mm_remove_node(obj->linear);
  140. mutex_unlock(&dev->struct_mutex);
  141. kfree(obj->linear);
  142. obj->linear = NULL;
  143. return -ENOMEM;
  144. }
  145. memset_io(ptr, 0, size);
  146. iounmap(ptr);
  147. obj->phys_addr = obj->linear->start;
  148. obj->dev_addr = obj->linear->start;
  149. }
  150. DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
  151. (unsigned long long)obj->phys_addr,
  152. (unsigned long long)obj->dev_addr);
  153. return 0;
  154. }
  155. void *
  156. armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
  157. {
  158. /* only linear objects need to be ioremap'd */
  159. if (!dobj->addr && dobj->linear)
  160. dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
  161. return dobj->addr;
  162. }
  163. struct armada_gem_object *
  164. armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
  165. {
  166. struct armada_gem_object *obj;
  167. size = roundup_gem_size(size);
  168. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  169. if (!obj)
  170. return NULL;
  171. drm_gem_private_object_init(dev, &obj->obj, size);
  172. obj->dev_addr = DMA_ERROR_CODE;
  173. DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
  174. return obj;
  175. }
  176. struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
  177. size_t size)
  178. {
  179. struct armada_gem_object *obj;
  180. struct address_space *mapping;
  181. size = roundup_gem_size(size);
  182. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  183. if (!obj)
  184. return NULL;
  185. if (drm_gem_object_init(dev, &obj->obj, size)) {
  186. kfree(obj);
  187. return NULL;
  188. }
  189. obj->dev_addr = DMA_ERROR_CODE;
  190. mapping = file_inode(obj->obj.filp)->i_mapping;
  191. mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
  192. DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
  193. return obj;
  194. }
  195. /* Dumb alloc support */
  196. int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  197. struct drm_mode_create_dumb *args)
  198. {
  199. struct armada_gem_object *dobj;
  200. u32 handle;
  201. size_t size;
  202. int ret;
  203. args->pitch = armada_pitch(args->width, args->bpp);
  204. args->size = size = args->pitch * args->height;
  205. dobj = armada_gem_alloc_private_object(dev, size);
  206. if (dobj == NULL)
  207. return -ENOMEM;
  208. ret = armada_gem_linear_back(dev, dobj);
  209. if (ret)
  210. goto err;
  211. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  212. if (ret)
  213. goto err;
  214. args->handle = handle;
  215. /* drop reference from allocate - handle holds it now */
  216. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  217. err:
  218. drm_gem_object_unreference_unlocked(&dobj->obj);
  219. return ret;
  220. }
  221. int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  222. uint32_t handle, uint64_t *offset)
  223. {
  224. struct armada_gem_object *obj;
  225. int ret = 0;
  226. mutex_lock(&dev->struct_mutex);
  227. obj = armada_gem_object_lookup(dev, file, handle);
  228. if (!obj) {
  229. DRM_ERROR("failed to lookup gem object\n");
  230. ret = -EINVAL;
  231. goto err_unlock;
  232. }
  233. /* Don't allow imported objects to be mapped */
  234. if (obj->obj.import_attach) {
  235. ret = -EINVAL;
  236. goto err_unlock;
  237. }
  238. ret = drm_gem_create_mmap_offset(&obj->obj);
  239. if (ret == 0) {
  240. *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
  241. DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
  242. }
  243. drm_gem_object_unreference(&obj->obj);
  244. err_unlock:
  245. mutex_unlock(&dev->struct_mutex);
  246. return ret;
  247. }
  248. int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
  249. uint32_t handle)
  250. {
  251. return drm_gem_handle_delete(file, handle);
  252. }
  253. /* Private driver gem ioctls */
  254. int armada_gem_create_ioctl(struct drm_device *dev, void *data,
  255. struct drm_file *file)
  256. {
  257. struct drm_armada_gem_create *args = data;
  258. struct armada_gem_object *dobj;
  259. size_t size;
  260. u32 handle;
  261. int ret;
  262. if (args->size == 0)
  263. return -ENOMEM;
  264. size = args->size;
  265. dobj = armada_gem_alloc_object(dev, size);
  266. if (dobj == NULL)
  267. return -ENOMEM;
  268. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  269. if (ret)
  270. goto err;
  271. args->handle = handle;
  272. /* drop reference from allocate - handle holds it now */
  273. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  274. err:
  275. drm_gem_object_unreference_unlocked(&dobj->obj);
  276. return ret;
  277. }
  278. /* Map a shmem-backed object into process memory space */
  279. int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
  280. struct drm_file *file)
  281. {
  282. struct drm_armada_gem_mmap *args = data;
  283. struct armada_gem_object *dobj;
  284. unsigned long addr;
  285. dobj = armada_gem_object_lookup(dev, file, args->handle);
  286. if (dobj == NULL)
  287. return -ENOENT;
  288. if (!dobj->obj.filp) {
  289. drm_gem_object_unreference(&dobj->obj);
  290. return -EINVAL;
  291. }
  292. addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
  293. MAP_SHARED, args->offset);
  294. drm_gem_object_unreference(&dobj->obj);
  295. if (IS_ERR_VALUE(addr))
  296. return addr;
  297. args->addr = addr;
  298. return 0;
  299. }
  300. int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  301. struct drm_file *file)
  302. {
  303. struct drm_armada_gem_pwrite *args = data;
  304. struct armada_gem_object *dobj;
  305. char __user *ptr;
  306. int ret;
  307. DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
  308. args->handle, args->offset, args->size, args->ptr);
  309. if (args->size == 0)
  310. return 0;
  311. ptr = (char __user *)(uintptr_t)args->ptr;
  312. if (!access_ok(VERIFY_READ, ptr, args->size))
  313. return -EFAULT;
  314. ret = fault_in_multipages_readable(ptr, args->size);
  315. if (ret)
  316. return ret;
  317. dobj = armada_gem_object_lookup(dev, file, args->handle);
  318. if (dobj == NULL)
  319. return -ENOENT;
  320. /* Must be a kernel-mapped object */
  321. if (!dobj->addr)
  322. return -EINVAL;
  323. if (args->offset > dobj->obj.size ||
  324. args->size > dobj->obj.size - args->offset) {
  325. DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
  326. ret = -EINVAL;
  327. goto unref;
  328. }
  329. if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
  330. ret = -EFAULT;
  331. } else if (dobj->update) {
  332. dobj->update(dobj->update_data);
  333. ret = 0;
  334. }
  335. unref:
  336. drm_gem_object_unreference_unlocked(&dobj->obj);
  337. return ret;
  338. }
  339. /* Prime support */
  340. struct sg_table *
  341. armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  342. enum dma_data_direction dir)
  343. {
  344. struct drm_gem_object *obj = attach->dmabuf->priv;
  345. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  346. struct scatterlist *sg;
  347. struct sg_table *sgt;
  348. int i, num;
  349. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  350. if (!sgt)
  351. return NULL;
  352. if (dobj->obj.filp) {
  353. struct address_space *mapping;
  354. int count;
  355. count = dobj->obj.size / PAGE_SIZE;
  356. if (sg_alloc_table(sgt, count, GFP_KERNEL))
  357. goto free_sgt;
  358. mapping = file_inode(dobj->obj.filp)->i_mapping;
  359. for_each_sg(sgt->sgl, sg, count, i) {
  360. struct page *page;
  361. page = shmem_read_mapping_page(mapping, i);
  362. if (IS_ERR(page)) {
  363. num = i;
  364. goto release;
  365. }
  366. sg_set_page(sg, page, PAGE_SIZE, 0);
  367. }
  368. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
  369. num = sgt->nents;
  370. goto release;
  371. }
  372. } else if (dobj->page) {
  373. /* Single contiguous page */
  374. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  375. goto free_sgt;
  376. sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
  377. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
  378. goto free_table;
  379. } else if (dobj->linear) {
  380. /* Single contiguous physical region - no struct page */
  381. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  382. goto free_sgt;
  383. sg_dma_address(sgt->sgl) = dobj->dev_addr;
  384. sg_dma_len(sgt->sgl) = dobj->obj.size;
  385. } else {
  386. goto free_sgt;
  387. }
  388. return sgt;
  389. release:
  390. for_each_sg(sgt->sgl, sg, num, i)
  391. page_cache_release(sg_page(sg));
  392. free_table:
  393. sg_free_table(sgt);
  394. free_sgt:
  395. kfree(sgt);
  396. return NULL;
  397. }
  398. static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  399. struct sg_table *sgt, enum dma_data_direction dir)
  400. {
  401. struct drm_gem_object *obj = attach->dmabuf->priv;
  402. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  403. int i;
  404. if (!dobj->linear)
  405. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
  406. if (dobj->obj.filp) {
  407. struct scatterlist *sg;
  408. for_each_sg(sgt->sgl, sg, sgt->nents, i)
  409. page_cache_release(sg_page(sg));
  410. }
  411. sg_free_table(sgt);
  412. kfree(sgt);
  413. }
  414. static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
  415. {
  416. return NULL;
  417. }
  418. static void
  419. armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
  420. {
  421. }
  422. static int
  423. armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  424. {
  425. return -EINVAL;
  426. }
  427. static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
  428. .map_dma_buf = armada_gem_prime_map_dma_buf,
  429. .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
  430. .release = drm_gem_dmabuf_release,
  431. .kmap_atomic = armada_gem_dmabuf_no_kmap,
  432. .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
  433. .kmap = armada_gem_dmabuf_no_kmap,
  434. .kunmap = armada_gem_dmabuf_no_kunmap,
  435. .mmap = armada_gem_dmabuf_mmap,
  436. };
  437. struct dma_buf *
  438. armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
  439. int flags)
  440. {
  441. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  442. exp_info.ops = &armada_gem_prime_dmabuf_ops;
  443. exp_info.size = obj->size;
  444. exp_info.flags = O_RDWR;
  445. exp_info.priv = obj;
  446. return dma_buf_export(&exp_info);
  447. }
  448. struct drm_gem_object *
  449. armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
  450. {
  451. struct dma_buf_attachment *attach;
  452. struct armada_gem_object *dobj;
  453. if (buf->ops == &armada_gem_prime_dmabuf_ops) {
  454. struct drm_gem_object *obj = buf->priv;
  455. if (obj->dev == dev) {
  456. /*
  457. * Importing our own dmabuf(s) increases the
  458. * refcount on the gem object itself.
  459. */
  460. drm_gem_object_reference(obj);
  461. return obj;
  462. }
  463. }
  464. attach = dma_buf_attach(buf, dev->dev);
  465. if (IS_ERR(attach))
  466. return ERR_CAST(attach);
  467. dobj = armada_gem_alloc_private_object(dev, buf->size);
  468. if (!dobj) {
  469. dma_buf_detach(buf, attach);
  470. return ERR_PTR(-ENOMEM);
  471. }
  472. dobj->obj.import_attach = attach;
  473. get_dma_buf(buf);
  474. /*
  475. * Don't call dma_buf_map_attachment() here - it maps the
  476. * scatterlist immediately for DMA, and this is not always
  477. * an appropriate thing to do.
  478. */
  479. return &dobj->obj;
  480. }
  481. int armada_gem_map_import(struct armada_gem_object *dobj)
  482. {
  483. int ret;
  484. dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
  485. DMA_TO_DEVICE);
  486. if (!dobj->sgt) {
  487. DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
  488. return -EINVAL;
  489. }
  490. if (IS_ERR(dobj->sgt)) {
  491. ret = PTR_ERR(dobj->sgt);
  492. dobj->sgt = NULL;
  493. DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
  494. return ret;
  495. }
  496. if (dobj->sgt->nents > 1) {
  497. DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
  498. return -EINVAL;
  499. }
  500. if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
  501. DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
  502. return -EINVAL;
  503. }
  504. dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
  505. return 0;
  506. }