videobuf2-dma-contig.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. /*
  2. * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/dma-buf.h>
  13. #include <linux/module.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/dma-mapping.h>
  18. #include <media/videobuf2-v4l2.h>
  19. #include <media/videobuf2-dma-contig.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_dc_conf {
  22. struct device *dev;
  23. };
  24. struct vb2_dc_buf {
  25. struct device *dev;
  26. void *vaddr;
  27. unsigned long size;
  28. dma_addr_t dma_addr;
  29. enum dma_data_direction dma_dir;
  30. struct sg_table *dma_sgt;
  31. struct frame_vector *vec;
  32. /* MMAP related */
  33. struct vb2_vmarea_handler handler;
  34. atomic_t refcount;
  35. struct sg_table *sgt_base;
  36. /* DMABUF related */
  37. struct dma_buf_attachment *db_attach;
  38. };
  39. /*********************************************/
  40. /* scatterlist table functions */
  41. /*********************************************/
  42. static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
  43. {
  44. struct scatterlist *s;
  45. dma_addr_t expected = sg_dma_address(sgt->sgl);
  46. unsigned int i;
  47. unsigned long size = 0;
  48. for_each_sg(sgt->sgl, s, sgt->nents, i) {
  49. if (sg_dma_address(s) != expected)
  50. break;
  51. expected = sg_dma_address(s) + sg_dma_len(s);
  52. size += sg_dma_len(s);
  53. }
  54. return size;
  55. }
  56. /*********************************************/
  57. /* callbacks for all buffers */
  58. /*********************************************/
  59. static void *vb2_dc_cookie(void *buf_priv)
  60. {
  61. struct vb2_dc_buf *buf = buf_priv;
  62. return &buf->dma_addr;
  63. }
  64. static void *vb2_dc_vaddr(void *buf_priv)
  65. {
  66. struct vb2_dc_buf *buf = buf_priv;
  67. if (!buf->vaddr && buf->db_attach)
  68. buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
  69. return buf->vaddr;
  70. }
  71. static unsigned int vb2_dc_num_users(void *buf_priv)
  72. {
  73. struct vb2_dc_buf *buf = buf_priv;
  74. return atomic_read(&buf->refcount);
  75. }
  76. static void vb2_dc_prepare(void *buf_priv)
  77. {
  78. struct vb2_dc_buf *buf = buf_priv;
  79. struct sg_table *sgt = buf->dma_sgt;
  80. /* DMABUF exporter will flush the cache for us */
  81. if (!sgt || buf->db_attach)
  82. return;
  83. dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
  84. buf->dma_dir);
  85. }
  86. static void vb2_dc_finish(void *buf_priv)
  87. {
  88. struct vb2_dc_buf *buf = buf_priv;
  89. struct sg_table *sgt = buf->dma_sgt;
  90. /* DMABUF exporter will flush the cache for us */
  91. if (!sgt || buf->db_attach)
  92. return;
  93. dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
  94. }
  95. /*********************************************/
  96. /* callbacks for MMAP buffers */
  97. /*********************************************/
  98. static void vb2_dc_put(void *buf_priv)
  99. {
  100. struct vb2_dc_buf *buf = buf_priv;
  101. if (!atomic_dec_and_test(&buf->refcount))
  102. return;
  103. if (buf->sgt_base) {
  104. sg_free_table(buf->sgt_base);
  105. kfree(buf->sgt_base);
  106. }
  107. dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
  108. put_device(buf->dev);
  109. kfree(buf);
  110. }
  111. static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
  112. enum dma_data_direction dma_dir, gfp_t gfp_flags)
  113. {
  114. struct vb2_dc_conf *conf = alloc_ctx;
  115. struct device *dev = conf->dev;
  116. struct vb2_dc_buf *buf;
  117. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  118. if (!buf)
  119. return ERR_PTR(-ENOMEM);
  120. buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
  121. GFP_KERNEL | gfp_flags);
  122. if (!buf->vaddr) {
  123. dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
  124. kfree(buf);
  125. return ERR_PTR(-ENOMEM);
  126. }
  127. /* Prevent the device from being released while the buffer is used */
  128. buf->dev = get_device(dev);
  129. buf->size = size;
  130. buf->dma_dir = dma_dir;
  131. buf->handler.refcount = &buf->refcount;
  132. buf->handler.put = vb2_dc_put;
  133. buf->handler.arg = buf;
  134. atomic_inc(&buf->refcount);
  135. return buf;
  136. }
  137. static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
  138. {
  139. struct vb2_dc_buf *buf = buf_priv;
  140. int ret;
  141. if (!buf) {
  142. printk(KERN_ERR "No buffer to map\n");
  143. return -EINVAL;
  144. }
  145. /*
  146. * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
  147. * map whole buffer
  148. */
  149. vma->vm_pgoff = 0;
  150. ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
  151. buf->dma_addr, buf->size);
  152. if (ret) {
  153. pr_err("Remapping memory failed, error: %d\n", ret);
  154. return ret;
  155. }
  156. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  157. vma->vm_private_data = &buf->handler;
  158. vma->vm_ops = &vb2_common_vm_ops;
  159. vma->vm_ops->open(vma);
  160. pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
  161. __func__, (unsigned long)buf->dma_addr, vma->vm_start,
  162. buf->size);
  163. return 0;
  164. }
  165. /*********************************************/
  166. /* DMABUF ops for exporters */
  167. /*********************************************/
  168. struct vb2_dc_attachment {
  169. struct sg_table sgt;
  170. enum dma_data_direction dma_dir;
  171. };
  172. static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  173. struct dma_buf_attachment *dbuf_attach)
  174. {
  175. struct vb2_dc_attachment *attach;
  176. unsigned int i;
  177. struct scatterlist *rd, *wr;
  178. struct sg_table *sgt;
  179. struct vb2_dc_buf *buf = dbuf->priv;
  180. int ret;
  181. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  182. if (!attach)
  183. return -ENOMEM;
  184. sgt = &attach->sgt;
  185. /* Copy the buf->base_sgt scatter list to the attachment, as we can't
  186. * map the same scatter list to multiple attachments at the same time.
  187. */
  188. ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
  189. if (ret) {
  190. kfree(attach);
  191. return -ENOMEM;
  192. }
  193. rd = buf->sgt_base->sgl;
  194. wr = sgt->sgl;
  195. for (i = 0; i < sgt->orig_nents; ++i) {
  196. sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
  197. rd = sg_next(rd);
  198. wr = sg_next(wr);
  199. }
  200. attach->dma_dir = DMA_NONE;
  201. dbuf_attach->priv = attach;
  202. return 0;
  203. }
  204. static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
  205. struct dma_buf_attachment *db_attach)
  206. {
  207. struct vb2_dc_attachment *attach = db_attach->priv;
  208. struct sg_table *sgt;
  209. if (!attach)
  210. return;
  211. sgt = &attach->sgt;
  212. /* release the scatterlist cache */
  213. if (attach->dma_dir != DMA_NONE)
  214. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  215. attach->dma_dir);
  216. sg_free_table(sgt);
  217. kfree(attach);
  218. db_attach->priv = NULL;
  219. }
  220. static struct sg_table *vb2_dc_dmabuf_ops_map(
  221. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  222. {
  223. struct vb2_dc_attachment *attach = db_attach->priv;
  224. /* stealing dmabuf mutex to serialize map/unmap operations */
  225. struct mutex *lock = &db_attach->dmabuf->lock;
  226. struct sg_table *sgt;
  227. mutex_lock(lock);
  228. sgt = &attach->sgt;
  229. /* return previously mapped sg table */
  230. if (attach->dma_dir == dma_dir) {
  231. mutex_unlock(lock);
  232. return sgt;
  233. }
  234. /* release any previous cache */
  235. if (attach->dma_dir != DMA_NONE) {
  236. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  237. attach->dma_dir);
  238. attach->dma_dir = DMA_NONE;
  239. }
  240. /* mapping to the client with new direction */
  241. sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  242. dma_dir);
  243. if (!sgt->nents) {
  244. pr_err("failed to map scatterlist\n");
  245. mutex_unlock(lock);
  246. return ERR_PTR(-EIO);
  247. }
  248. attach->dma_dir = dma_dir;
  249. mutex_unlock(lock);
  250. return sgt;
  251. }
  252. static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  253. struct sg_table *sgt, enum dma_data_direction dma_dir)
  254. {
  255. /* nothing to be done here */
  256. }
  257. static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
  258. {
  259. /* drop reference obtained in vb2_dc_get_dmabuf */
  260. vb2_dc_put(dbuf->priv);
  261. }
  262. static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  263. {
  264. struct vb2_dc_buf *buf = dbuf->priv;
  265. return buf->vaddr + pgnum * PAGE_SIZE;
  266. }
  267. static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  268. {
  269. struct vb2_dc_buf *buf = dbuf->priv;
  270. return buf->vaddr;
  271. }
  272. static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  273. struct vm_area_struct *vma)
  274. {
  275. return vb2_dc_mmap(dbuf->priv, vma);
  276. }
  277. static struct dma_buf_ops vb2_dc_dmabuf_ops = {
  278. .attach = vb2_dc_dmabuf_ops_attach,
  279. .detach = vb2_dc_dmabuf_ops_detach,
  280. .map_dma_buf = vb2_dc_dmabuf_ops_map,
  281. .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
  282. .kmap = vb2_dc_dmabuf_ops_kmap,
  283. .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
  284. .vmap = vb2_dc_dmabuf_ops_vmap,
  285. .mmap = vb2_dc_dmabuf_ops_mmap,
  286. .release = vb2_dc_dmabuf_ops_release,
  287. };
  288. static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
  289. {
  290. int ret;
  291. struct sg_table *sgt;
  292. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  293. if (!sgt) {
  294. dev_err(buf->dev, "failed to alloc sg table\n");
  295. return NULL;
  296. }
  297. ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
  298. buf->size);
  299. if (ret < 0) {
  300. dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
  301. kfree(sgt);
  302. return NULL;
  303. }
  304. return sgt;
  305. }
  306. static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
  307. {
  308. struct vb2_dc_buf *buf = buf_priv;
  309. struct dma_buf *dbuf;
  310. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  311. exp_info.ops = &vb2_dc_dmabuf_ops;
  312. exp_info.size = buf->size;
  313. exp_info.flags = flags;
  314. exp_info.priv = buf;
  315. if (!buf->sgt_base)
  316. buf->sgt_base = vb2_dc_get_base_sgt(buf);
  317. if (WARN_ON(!buf->sgt_base))
  318. return NULL;
  319. dbuf = dma_buf_export(&exp_info);
  320. if (IS_ERR(dbuf))
  321. return NULL;
  322. /* dmabuf keeps reference to vb2 buffer */
  323. atomic_inc(&buf->refcount);
  324. return dbuf;
  325. }
  326. /*********************************************/
  327. /* callbacks for USERPTR buffers */
  328. /*********************************************/
  329. static void vb2_dc_put_userptr(void *buf_priv)
  330. {
  331. struct vb2_dc_buf *buf = buf_priv;
  332. struct sg_table *sgt = buf->dma_sgt;
  333. int i;
  334. struct page **pages;
  335. if (sgt) {
  336. DEFINE_DMA_ATTRS(attrs);
  337. dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
  338. /*
  339. * No need to sync to CPU, it's already synced to the CPU
  340. * since the finish() memop will have been called before this.
  341. */
  342. dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  343. buf->dma_dir, &attrs);
  344. pages = frame_vector_pages(buf->vec);
  345. /* sgt should exist only if vector contains pages... */
  346. BUG_ON(IS_ERR(pages));
  347. for (i = 0; i < frame_vector_count(buf->vec); i++)
  348. set_page_dirty_lock(pages[i]);
  349. sg_free_table(sgt);
  350. kfree(sgt);
  351. }
  352. vb2_destroy_framevec(buf->vec);
  353. kfree(buf);
  354. }
  355. /*
  356. * For some kind of reserved memory there might be no struct page available,
  357. * so all that can be done to support such 'pages' is to try to convert
  358. * pfn to dma address or at the last resort just assume that
  359. * dma address == physical address (like it has been assumed in earlier version
  360. * of videobuf2-dma-contig
  361. */
  362. #ifdef __arch_pfn_to_dma
  363. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  364. {
  365. return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
  366. }
  367. #elif defined(__pfn_to_bus)
  368. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  369. {
  370. return (dma_addr_t)__pfn_to_bus(pfn);
  371. }
  372. #elif defined(__pfn_to_phys)
  373. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  374. {
  375. return (dma_addr_t)__pfn_to_phys(pfn);
  376. }
  377. #else
  378. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  379. {
  380. /* really, we cannot do anything better at this point */
  381. return (dma_addr_t)(pfn) << PAGE_SHIFT;
  382. }
  383. #endif
  384. static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
  385. unsigned long size, enum dma_data_direction dma_dir)
  386. {
  387. struct vb2_dc_conf *conf = alloc_ctx;
  388. struct vb2_dc_buf *buf;
  389. struct frame_vector *vec;
  390. unsigned long offset;
  391. int n_pages, i;
  392. int ret = 0;
  393. struct sg_table *sgt;
  394. unsigned long contig_size;
  395. unsigned long dma_align = dma_get_cache_alignment();
  396. DEFINE_DMA_ATTRS(attrs);
  397. dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
  398. /* Only cache aligned DMA transfers are reliable */
  399. if (!IS_ALIGNED(vaddr | size, dma_align)) {
  400. pr_debug("user data must be aligned to %lu bytes\n", dma_align);
  401. return ERR_PTR(-EINVAL);
  402. }
  403. if (!size) {
  404. pr_debug("size is zero\n");
  405. return ERR_PTR(-EINVAL);
  406. }
  407. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  408. if (!buf)
  409. return ERR_PTR(-ENOMEM);
  410. buf->dev = conf->dev;
  411. buf->dma_dir = dma_dir;
  412. offset = vaddr & ~PAGE_MASK;
  413. vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
  414. if (IS_ERR(vec)) {
  415. ret = PTR_ERR(vec);
  416. goto fail_buf;
  417. }
  418. buf->vec = vec;
  419. n_pages = frame_vector_count(vec);
  420. ret = frame_vector_to_pages(vec);
  421. if (ret < 0) {
  422. unsigned long *nums = frame_vector_pfns(vec);
  423. /*
  424. * Failed to convert to pages... Check the memory is physically
  425. * contiguous and use direct mapping
  426. */
  427. for (i = 1; i < n_pages; i++)
  428. if (nums[i-1] + 1 != nums[i])
  429. goto fail_pfnvec;
  430. buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
  431. goto out;
  432. }
  433. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  434. if (!sgt) {
  435. pr_err("failed to allocate sg table\n");
  436. ret = -ENOMEM;
  437. goto fail_pfnvec;
  438. }
  439. ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
  440. offset, size, GFP_KERNEL);
  441. if (ret) {
  442. pr_err("failed to initialize sg table\n");
  443. goto fail_sgt;
  444. }
  445. /*
  446. * No need to sync to the device, this will happen later when the
  447. * prepare() memop is called.
  448. */
  449. sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  450. buf->dma_dir, &attrs);
  451. if (sgt->nents <= 0) {
  452. pr_err("failed to map scatterlist\n");
  453. ret = -EIO;
  454. goto fail_sgt_init;
  455. }
  456. contig_size = vb2_dc_get_contiguous_size(sgt);
  457. if (contig_size < size) {
  458. pr_err("contiguous mapping is too small %lu/%lu\n",
  459. contig_size, size);
  460. ret = -EFAULT;
  461. goto fail_map_sg;
  462. }
  463. buf->dma_addr = sg_dma_address(sgt->sgl);
  464. buf->dma_sgt = sgt;
  465. out:
  466. buf->size = size;
  467. return buf;
  468. fail_map_sg:
  469. dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  470. buf->dma_dir, &attrs);
  471. fail_sgt_init:
  472. sg_free_table(sgt);
  473. fail_sgt:
  474. kfree(sgt);
  475. fail_pfnvec:
  476. vb2_destroy_framevec(vec);
  477. fail_buf:
  478. kfree(buf);
  479. return ERR_PTR(ret);
  480. }
  481. /*********************************************/
  482. /* callbacks for DMABUF buffers */
  483. /*********************************************/
  484. static int vb2_dc_map_dmabuf(void *mem_priv)
  485. {
  486. struct vb2_dc_buf *buf = mem_priv;
  487. struct sg_table *sgt;
  488. unsigned long contig_size;
  489. if (WARN_ON(!buf->db_attach)) {
  490. pr_err("trying to pin a non attached buffer\n");
  491. return -EINVAL;
  492. }
  493. if (WARN_ON(buf->dma_sgt)) {
  494. pr_err("dmabuf buffer is already pinned\n");
  495. return 0;
  496. }
  497. /* get the associated scatterlist for this buffer */
  498. sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
  499. if (IS_ERR(sgt)) {
  500. pr_err("Error getting dmabuf scatterlist\n");
  501. return -EINVAL;
  502. }
  503. /* checking if dmabuf is big enough to store contiguous chunk */
  504. contig_size = vb2_dc_get_contiguous_size(sgt);
  505. if (contig_size < buf->size) {
  506. pr_err("contiguous chunk is too small %lu/%lu b\n",
  507. contig_size, buf->size);
  508. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  509. return -EFAULT;
  510. }
  511. buf->dma_addr = sg_dma_address(sgt->sgl);
  512. buf->dma_sgt = sgt;
  513. buf->vaddr = NULL;
  514. return 0;
  515. }
  516. static void vb2_dc_unmap_dmabuf(void *mem_priv)
  517. {
  518. struct vb2_dc_buf *buf = mem_priv;
  519. struct sg_table *sgt = buf->dma_sgt;
  520. if (WARN_ON(!buf->db_attach)) {
  521. pr_err("trying to unpin a not attached buffer\n");
  522. return;
  523. }
  524. if (WARN_ON(!sgt)) {
  525. pr_err("dmabuf buffer is already unpinned\n");
  526. return;
  527. }
  528. if (buf->vaddr) {
  529. dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
  530. buf->vaddr = NULL;
  531. }
  532. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  533. buf->dma_addr = 0;
  534. buf->dma_sgt = NULL;
  535. }
  536. static void vb2_dc_detach_dmabuf(void *mem_priv)
  537. {
  538. struct vb2_dc_buf *buf = mem_priv;
  539. /* if vb2 works correctly you should never detach mapped buffer */
  540. if (WARN_ON(buf->dma_addr))
  541. vb2_dc_unmap_dmabuf(buf);
  542. /* detach this attachment */
  543. dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
  544. kfree(buf);
  545. }
  546. static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
  547. unsigned long size, enum dma_data_direction dma_dir)
  548. {
  549. struct vb2_dc_conf *conf = alloc_ctx;
  550. struct vb2_dc_buf *buf;
  551. struct dma_buf_attachment *dba;
  552. if (dbuf->size < size)
  553. return ERR_PTR(-EFAULT);
  554. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  555. if (!buf)
  556. return ERR_PTR(-ENOMEM);
  557. buf->dev = conf->dev;
  558. /* create attachment for the dmabuf with the user device */
  559. dba = dma_buf_attach(dbuf, buf->dev);
  560. if (IS_ERR(dba)) {
  561. pr_err("failed to attach dmabuf\n");
  562. kfree(buf);
  563. return dba;
  564. }
  565. buf->dma_dir = dma_dir;
  566. buf->size = size;
  567. buf->db_attach = dba;
  568. return buf;
  569. }
  570. /*********************************************/
  571. /* DMA CONTIG exported functions */
  572. /*********************************************/
  573. const struct vb2_mem_ops vb2_dma_contig_memops = {
  574. .alloc = vb2_dc_alloc,
  575. .put = vb2_dc_put,
  576. .get_dmabuf = vb2_dc_get_dmabuf,
  577. .cookie = vb2_dc_cookie,
  578. .vaddr = vb2_dc_vaddr,
  579. .mmap = vb2_dc_mmap,
  580. .get_userptr = vb2_dc_get_userptr,
  581. .put_userptr = vb2_dc_put_userptr,
  582. .prepare = vb2_dc_prepare,
  583. .finish = vb2_dc_finish,
  584. .map_dmabuf = vb2_dc_map_dmabuf,
  585. .unmap_dmabuf = vb2_dc_unmap_dmabuf,
  586. .attach_dmabuf = vb2_dc_attach_dmabuf,
  587. .detach_dmabuf = vb2_dc_detach_dmabuf,
  588. .num_users = vb2_dc_num_users,
  589. };
  590. EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
  591. void *vb2_dma_contig_init_ctx(struct device *dev)
  592. {
  593. struct vb2_dc_conf *conf;
  594. conf = kzalloc(sizeof *conf, GFP_KERNEL);
  595. if (!conf)
  596. return ERR_PTR(-ENOMEM);
  597. conf->dev = dev;
  598. return conf;
  599. }
  600. EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
  601. void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
  602. {
  603. if (!IS_ERR_OR_NULL(alloc_ctx))
  604. kfree(alloc_ctx);
  605. }
  606. EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
  607. MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
  608. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  609. MODULE_LICENSE("GPL");