videobuf2-vmalloc.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. /*
  2. * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/io.h>
  13. #include <linux/module.h>
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-v4l2.h>
  19. #include <media/videobuf2-vmalloc.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_vmalloc_buf {
  22. void *vaddr;
  23. struct frame_vector *vec;
  24. enum dma_data_direction dma_dir;
  25. unsigned long size;
  26. atomic_t refcount;
  27. struct vb2_vmarea_handler handler;
  28. struct dma_buf *dbuf;
  29. };
  30. static void vb2_vmalloc_put(void *buf_priv);
  31. static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
  32. enum dma_data_direction dma_dir, gfp_t gfp_flags)
  33. {
  34. struct vb2_vmalloc_buf *buf;
  35. buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
  36. if (!buf)
  37. return NULL;
  38. buf->size = size;
  39. buf->vaddr = vmalloc_user(buf->size);
  40. buf->dma_dir = dma_dir;
  41. buf->handler.refcount = &buf->refcount;
  42. buf->handler.put = vb2_vmalloc_put;
  43. buf->handler.arg = buf;
  44. if (!buf->vaddr) {
  45. pr_debug("vmalloc of size %ld failed\n", buf->size);
  46. kfree(buf);
  47. return NULL;
  48. }
  49. atomic_inc(&buf->refcount);
  50. return buf;
  51. }
  52. static void vb2_vmalloc_put(void *buf_priv)
  53. {
  54. struct vb2_vmalloc_buf *buf = buf_priv;
  55. if (atomic_dec_and_test(&buf->refcount)) {
  56. vfree(buf->vaddr);
  57. kfree(buf);
  58. }
  59. }
  60. static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
  61. unsigned long size,
  62. enum dma_data_direction dma_dir)
  63. {
  64. struct vb2_vmalloc_buf *buf;
  65. struct frame_vector *vec;
  66. int n_pages, offset, i;
  67. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  68. if (!buf)
  69. return NULL;
  70. buf->dma_dir = dma_dir;
  71. offset = vaddr & ~PAGE_MASK;
  72. buf->size = size;
  73. vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
  74. if (IS_ERR(vec))
  75. goto fail_pfnvec_create;
  76. buf->vec = vec;
  77. n_pages = frame_vector_count(vec);
  78. if (frame_vector_to_pages(vec) < 0) {
  79. unsigned long *nums = frame_vector_pfns(vec);
  80. /*
  81. * We cannot get page pointers for these pfns. Check memory is
  82. * physically contiguous and use direct mapping.
  83. */
  84. for (i = 1; i < n_pages; i++)
  85. if (nums[i-1] + 1 != nums[i])
  86. goto fail_map;
  87. buf->vaddr = (__force void *)
  88. ioremap_nocache(nums[0] << PAGE_SHIFT, size);
  89. } else {
  90. buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
  91. PAGE_KERNEL);
  92. }
  93. if (!buf->vaddr)
  94. goto fail_map;
  95. buf->vaddr += offset;
  96. return buf;
  97. fail_map:
  98. vb2_destroy_framevec(vec);
  99. fail_pfnvec_create:
  100. kfree(buf);
  101. return NULL;
  102. }
  103. static void vb2_vmalloc_put_userptr(void *buf_priv)
  104. {
  105. struct vb2_vmalloc_buf *buf = buf_priv;
  106. unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
  107. unsigned int i;
  108. struct page **pages;
  109. unsigned int n_pages;
  110. if (!buf->vec->is_pfns) {
  111. n_pages = frame_vector_count(buf->vec);
  112. pages = frame_vector_pages(buf->vec);
  113. if (vaddr)
  114. vm_unmap_ram((void *)vaddr, n_pages);
  115. if (buf->dma_dir == DMA_FROM_DEVICE)
  116. for (i = 0; i < n_pages; i++)
  117. set_page_dirty_lock(pages[i]);
  118. } else {
  119. iounmap((__force void __iomem *)buf->vaddr);
  120. }
  121. vb2_destroy_framevec(buf->vec);
  122. kfree(buf);
  123. }
  124. static void *vb2_vmalloc_vaddr(void *buf_priv)
  125. {
  126. struct vb2_vmalloc_buf *buf = buf_priv;
  127. if (!buf->vaddr) {
  128. pr_err("Address of an unallocated plane requested "
  129. "or cannot map user pointer\n");
  130. return NULL;
  131. }
  132. return buf->vaddr;
  133. }
  134. static unsigned int vb2_vmalloc_num_users(void *buf_priv)
  135. {
  136. struct vb2_vmalloc_buf *buf = buf_priv;
  137. return atomic_read(&buf->refcount);
  138. }
  139. static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
  140. {
  141. struct vb2_vmalloc_buf *buf = buf_priv;
  142. int ret;
  143. if (!buf) {
  144. pr_err("No memory to map\n");
  145. return -EINVAL;
  146. }
  147. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  148. if (ret) {
  149. pr_err("Remapping vmalloc memory, error: %d\n", ret);
  150. return ret;
  151. }
  152. /*
  153. * Make sure that vm_areas for 2 buffers won't be merged together
  154. */
  155. vma->vm_flags |= VM_DONTEXPAND;
  156. /*
  157. * Use common vm_area operations to track buffer refcount.
  158. */
  159. vma->vm_private_data = &buf->handler;
  160. vma->vm_ops = &vb2_common_vm_ops;
  161. vma->vm_ops->open(vma);
  162. return 0;
  163. }
  164. #ifdef CONFIG_HAS_DMA
  165. /*********************************************/
  166. /* DMABUF ops for exporters */
  167. /*********************************************/
  168. struct vb2_vmalloc_attachment {
  169. struct sg_table sgt;
  170. enum dma_data_direction dma_dir;
  171. };
  172. static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  173. struct dma_buf_attachment *dbuf_attach)
  174. {
  175. struct vb2_vmalloc_attachment *attach;
  176. struct vb2_vmalloc_buf *buf = dbuf->priv;
  177. int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
  178. struct sg_table *sgt;
  179. struct scatterlist *sg;
  180. void *vaddr = buf->vaddr;
  181. int ret;
  182. int i;
  183. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  184. if (!attach)
  185. return -ENOMEM;
  186. sgt = &attach->sgt;
  187. ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
  188. if (ret) {
  189. kfree(attach);
  190. return ret;
  191. }
  192. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  193. struct page *page = vmalloc_to_page(vaddr);
  194. if (!page) {
  195. sg_free_table(sgt);
  196. kfree(attach);
  197. return -ENOMEM;
  198. }
  199. sg_set_page(sg, page, PAGE_SIZE, 0);
  200. vaddr += PAGE_SIZE;
  201. }
  202. attach->dma_dir = DMA_NONE;
  203. dbuf_attach->priv = attach;
  204. return 0;
  205. }
  206. static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
  207. struct dma_buf_attachment *db_attach)
  208. {
  209. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  210. struct sg_table *sgt;
  211. if (!attach)
  212. return;
  213. sgt = &attach->sgt;
  214. /* release the scatterlist cache */
  215. if (attach->dma_dir != DMA_NONE)
  216. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  217. attach->dma_dir);
  218. sg_free_table(sgt);
  219. kfree(attach);
  220. db_attach->priv = NULL;
  221. }
  222. static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
  223. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  224. {
  225. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  226. /* stealing dmabuf mutex to serialize map/unmap operations */
  227. struct mutex *lock = &db_attach->dmabuf->lock;
  228. struct sg_table *sgt;
  229. mutex_lock(lock);
  230. sgt = &attach->sgt;
  231. /* return previously mapped sg table */
  232. if (attach->dma_dir == dma_dir) {
  233. mutex_unlock(lock);
  234. return sgt;
  235. }
  236. /* release any previous cache */
  237. if (attach->dma_dir != DMA_NONE) {
  238. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  239. attach->dma_dir);
  240. attach->dma_dir = DMA_NONE;
  241. }
  242. /* mapping to the client with new direction */
  243. sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  244. dma_dir);
  245. if (!sgt->nents) {
  246. pr_err("failed to map scatterlist\n");
  247. mutex_unlock(lock);
  248. return ERR_PTR(-EIO);
  249. }
  250. attach->dma_dir = dma_dir;
  251. mutex_unlock(lock);
  252. return sgt;
  253. }
  254. static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  255. struct sg_table *sgt, enum dma_data_direction dma_dir)
  256. {
  257. /* nothing to be done here */
  258. }
  259. static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
  260. {
  261. /* drop reference obtained in vb2_vmalloc_get_dmabuf */
  262. vb2_vmalloc_put(dbuf->priv);
  263. }
  264. static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  265. {
  266. struct vb2_vmalloc_buf *buf = dbuf->priv;
  267. return buf->vaddr + pgnum * PAGE_SIZE;
  268. }
  269. static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  270. {
  271. struct vb2_vmalloc_buf *buf = dbuf->priv;
  272. return buf->vaddr;
  273. }
  274. static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  275. struct vm_area_struct *vma)
  276. {
  277. return vb2_vmalloc_mmap(dbuf->priv, vma);
  278. }
  279. static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
  280. .attach = vb2_vmalloc_dmabuf_ops_attach,
  281. .detach = vb2_vmalloc_dmabuf_ops_detach,
  282. .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
  283. .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
  284. .kmap = vb2_vmalloc_dmabuf_ops_kmap,
  285. .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
  286. .vmap = vb2_vmalloc_dmabuf_ops_vmap,
  287. .mmap = vb2_vmalloc_dmabuf_ops_mmap,
  288. .release = vb2_vmalloc_dmabuf_ops_release,
  289. };
  290. static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
  291. {
  292. struct vb2_vmalloc_buf *buf = buf_priv;
  293. struct dma_buf *dbuf;
  294. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  295. exp_info.ops = &vb2_vmalloc_dmabuf_ops;
  296. exp_info.size = buf->size;
  297. exp_info.flags = flags;
  298. exp_info.priv = buf;
  299. if (WARN_ON(!buf->vaddr))
  300. return NULL;
  301. dbuf = dma_buf_export(&exp_info);
  302. if (IS_ERR(dbuf))
  303. return NULL;
  304. /* dmabuf keeps reference to vb2 buffer */
  305. atomic_inc(&buf->refcount);
  306. return dbuf;
  307. }
  308. #endif /* CONFIG_HAS_DMA */
  309. /*********************************************/
  310. /* callbacks for DMABUF buffers */
  311. /*********************************************/
  312. static int vb2_vmalloc_map_dmabuf(void *mem_priv)
  313. {
  314. struct vb2_vmalloc_buf *buf = mem_priv;
  315. buf->vaddr = dma_buf_vmap(buf->dbuf);
  316. return buf->vaddr ? 0 : -EFAULT;
  317. }
  318. static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
  319. {
  320. struct vb2_vmalloc_buf *buf = mem_priv;
  321. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  322. buf->vaddr = NULL;
  323. }
  324. static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
  325. {
  326. struct vb2_vmalloc_buf *buf = mem_priv;
  327. if (buf->vaddr)
  328. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  329. kfree(buf);
  330. }
  331. static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
  332. unsigned long size, enum dma_data_direction dma_dir)
  333. {
  334. struct vb2_vmalloc_buf *buf;
  335. if (dbuf->size < size)
  336. return ERR_PTR(-EFAULT);
  337. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  338. if (!buf)
  339. return ERR_PTR(-ENOMEM);
  340. buf->dbuf = dbuf;
  341. buf->dma_dir = dma_dir;
  342. buf->size = size;
  343. return buf;
  344. }
  345. const struct vb2_mem_ops vb2_vmalloc_memops = {
  346. .alloc = vb2_vmalloc_alloc,
  347. .put = vb2_vmalloc_put,
  348. .get_userptr = vb2_vmalloc_get_userptr,
  349. .put_userptr = vb2_vmalloc_put_userptr,
  350. #ifdef CONFIG_HAS_DMA
  351. .get_dmabuf = vb2_vmalloc_get_dmabuf,
  352. #endif
  353. .map_dmabuf = vb2_vmalloc_map_dmabuf,
  354. .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
  355. .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
  356. .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
  357. .vaddr = vb2_vmalloc_vaddr,
  358. .mmap = vb2_vmalloc_mmap,
  359. .num_users = vb2_vmalloc_num_users,
  360. };
  361. EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
  362. MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
  363. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  364. MODULE_LICENSE("GPL");