dma-mapping.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
  3. *
  4. * Copyright (c) 2006 SUSE Linux Products GmbH
  5. * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
  6. *
  7. * This file is released under the GPLv2.
  8. */
  9. #include <linux/dma-mapping.h>
  10. #include <linux/export.h>
  11. #include <linux/gfp.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <asm-generic/dma-coherent.h>
  15. /*
  16. * Managed DMA API
  17. */
  18. struct dma_devres {
  19. size_t size;
  20. void *vaddr;
  21. dma_addr_t dma_handle;
  22. };
  23. static void dmam_coherent_release(struct device *dev, void *res)
  24. {
  25. struct dma_devres *this = res;
  26. dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
  27. }
  28. static void dmam_noncoherent_release(struct device *dev, void *res)
  29. {
  30. struct dma_devres *this = res;
  31. dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
  32. }
  33. static int dmam_match(struct device *dev, void *res, void *match_data)
  34. {
  35. struct dma_devres *this = res, *match = match_data;
  36. if (this->vaddr == match->vaddr) {
  37. WARN_ON(this->size != match->size ||
  38. this->dma_handle != match->dma_handle);
  39. return 1;
  40. }
  41. return 0;
  42. }
  43. /**
  44. * dmam_alloc_coherent - Managed dma_alloc_coherent()
  45. * @dev: Device to allocate coherent memory for
  46. * @size: Size of allocation
  47. * @dma_handle: Out argument for allocated DMA handle
  48. * @gfp: Allocation flags
  49. *
  50. * Managed dma_alloc_coherent(). Memory allocated using this function
  51. * will be automatically released on driver detach.
  52. *
  53. * RETURNS:
  54. * Pointer to allocated memory on success, NULL on failure.
  55. */
  56. void *dmam_alloc_coherent(struct device *dev, size_t size,
  57. dma_addr_t *dma_handle, gfp_t gfp)
  58. {
  59. struct dma_devres *dr;
  60. void *vaddr;
  61. dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
  62. if (!dr)
  63. return NULL;
  64. vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
  65. if (!vaddr) {
  66. devres_free(dr);
  67. return NULL;
  68. }
  69. dr->vaddr = vaddr;
  70. dr->dma_handle = *dma_handle;
  71. dr->size = size;
  72. devres_add(dev, dr);
  73. return vaddr;
  74. }
  75. EXPORT_SYMBOL(dmam_alloc_coherent);
  76. /**
  77. * dmam_free_coherent - Managed dma_free_coherent()
  78. * @dev: Device to free coherent memory for
  79. * @size: Size of allocation
  80. * @vaddr: Virtual address of the memory to free
  81. * @dma_handle: DMA handle of the memory to free
  82. *
  83. * Managed dma_free_coherent().
  84. */
  85. void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  86. dma_addr_t dma_handle)
  87. {
  88. struct dma_devres match_data = { size, vaddr, dma_handle };
  89. dma_free_coherent(dev, size, vaddr, dma_handle);
  90. WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
  91. &match_data));
  92. }
  93. EXPORT_SYMBOL(dmam_free_coherent);
  94. /**
  95. * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
  96. * @dev: Device to allocate non_coherent memory for
  97. * @size: Size of allocation
  98. * @dma_handle: Out argument for allocated DMA handle
  99. * @gfp: Allocation flags
  100. *
  101. * Managed dma_alloc_non_coherent(). Memory allocated using this
  102. * function will be automatically released on driver detach.
  103. *
  104. * RETURNS:
  105. * Pointer to allocated memory on success, NULL on failure.
  106. */
  107. void *dmam_alloc_noncoherent(struct device *dev, size_t size,
  108. dma_addr_t *dma_handle, gfp_t gfp)
  109. {
  110. struct dma_devres *dr;
  111. void *vaddr;
  112. dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
  113. if (!dr)
  114. return NULL;
  115. vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
  116. if (!vaddr) {
  117. devres_free(dr);
  118. return NULL;
  119. }
  120. dr->vaddr = vaddr;
  121. dr->dma_handle = *dma_handle;
  122. dr->size = size;
  123. devres_add(dev, dr);
  124. return vaddr;
  125. }
  126. EXPORT_SYMBOL(dmam_alloc_noncoherent);
  127. /**
  128. * dmam_free_coherent - Managed dma_free_noncoherent()
  129. * @dev: Device to free noncoherent memory for
  130. * @size: Size of allocation
  131. * @vaddr: Virtual address of the memory to free
  132. * @dma_handle: DMA handle of the memory to free
  133. *
  134. * Managed dma_free_noncoherent().
  135. */
  136. void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  137. dma_addr_t dma_handle)
  138. {
  139. struct dma_devres match_data = { size, vaddr, dma_handle };
  140. dma_free_noncoherent(dev, size, vaddr, dma_handle);
  141. WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
  142. &match_data));
  143. }
  144. EXPORT_SYMBOL(dmam_free_noncoherent);
  145. #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
  146. static void dmam_coherent_decl_release(struct device *dev, void *res)
  147. {
  148. dma_release_declared_memory(dev);
  149. }
  150. /**
  151. * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
  152. * @dev: Device to declare coherent memory for
  153. * @phys_addr: Physical address of coherent memory to be declared
  154. * @device_addr: Device address of coherent memory to be declared
  155. * @size: Size of coherent memory to be declared
  156. * @flags: Flags
  157. *
  158. * Managed dma_declare_coherent_memory().
  159. *
  160. * RETURNS:
  161. * 0 on success, -errno on failure.
  162. */
  163. int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  164. dma_addr_t device_addr, size_t size, int flags)
  165. {
  166. void *res;
  167. int rc;
  168. res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
  169. if (!res)
  170. return -ENOMEM;
  171. rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
  172. flags);
  173. if (rc == 0)
  174. devres_add(dev, res);
  175. else
  176. devres_free(res);
  177. return rc;
  178. }
  179. EXPORT_SYMBOL(dmam_declare_coherent_memory);
  180. /**
  181. * dmam_release_declared_memory - Managed dma_release_declared_memory().
  182. * @dev: Device to release declared coherent memory for
  183. *
  184. * Managed dmam_release_declared_memory().
  185. */
  186. void dmam_release_declared_memory(struct device *dev)
  187. {
  188. WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
  189. }
  190. EXPORT_SYMBOL(dmam_release_declared_memory);
  191. #endif
  192. /*
  193. * Create scatter-list for the already allocated DMA buffer.
  194. */
  195. int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  196. void *cpu_addr, dma_addr_t handle, size_t size)
  197. {
  198. struct page *page = virt_to_page(cpu_addr);
  199. int ret;
  200. ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
  201. if (unlikely(ret))
  202. return ret;
  203. sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
  204. return 0;
  205. }
  206. EXPORT_SYMBOL(dma_common_get_sgtable);
  207. /*
  208. * Create userspace mapping for the DMA-coherent memory.
  209. */
  210. int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  211. void *cpu_addr, dma_addr_t dma_addr, size_t size)
  212. {
  213. int ret = -ENXIO;
  214. #ifdef CONFIG_MMU
  215. unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  216. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  217. unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
  218. unsigned long off = vma->vm_pgoff;
  219. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  220. if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
  221. return ret;
  222. if (off < count && user_count <= (count - off)) {
  223. ret = remap_pfn_range(vma, vma->vm_start,
  224. pfn + off,
  225. user_count << PAGE_SHIFT,
  226. vma->vm_page_prot);
  227. }
  228. #endif /* CONFIG_MMU */
  229. return ret;
  230. }
  231. EXPORT_SYMBOL(dma_common_mmap);
  232. #ifdef CONFIG_MMU
  233. /*
  234. * remaps an array of PAGE_SIZE pages into another vm_area
  235. * Cannot be used in non-sleeping contexts
  236. */
  237. void *dma_common_pages_remap(struct page **pages, size_t size,
  238. unsigned long vm_flags, pgprot_t prot,
  239. const void *caller)
  240. {
  241. struct vm_struct *area;
  242. area = get_vm_area_caller(size, vm_flags, caller);
  243. if (!area)
  244. return NULL;
  245. area->pages = pages;
  246. if (map_vm_area(area, prot, pages)) {
  247. vunmap(area->addr);
  248. return NULL;
  249. }
  250. return area->addr;
  251. }
  252. /*
  253. * remaps an allocated contiguous region into another vm_area.
  254. * Cannot be used in non-sleeping contexts
  255. */
  256. void *dma_common_contiguous_remap(struct page *page, size_t size,
  257. unsigned long vm_flags,
  258. pgprot_t prot, const void *caller)
  259. {
  260. int i;
  261. struct page **pages;
  262. void *ptr;
  263. unsigned long pfn;
  264. pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
  265. if (!pages)
  266. return NULL;
  267. for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++)
  268. pages[i] = pfn_to_page(pfn + i);
  269. ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
  270. kfree(pages);
  271. return ptr;
  272. }
  273. /*
  274. * unmaps a range previously mapped by dma_common_*_remap
  275. */
  276. void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
  277. {
  278. struct vm_struct *area = find_vm_area(cpu_addr);
  279. if (!area || (area->flags & vm_flags) != vm_flags) {
  280. WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
  281. return;
  282. }
  283. unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
  284. vunmap(cpu_addr);
  285. }
  286. #endif