dma.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  3. *
  4. * Provide default implementations of the DMA mapping callbacks for
  5. * directly mapped busses.
  6. */
  7. #include <linux/device.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/dma-debug.h>
  10. #include <linux/gfp.h>
  11. #include <linux/memblock.h>
  12. #include <linux/export.h>
  13. #include <linux/pci.h>
  14. #include <asm/vio.h>
  15. #include <asm/bug.h>
  16. #include <asm/machdep.h>
  17. #include <asm/swiotlb.h>
  18. #include <asm/iommu.h>
  19. /*
  20. * Generic direct DMA implementation
  21. *
  22. * This implementation supports a per-device offset that can be applied if
  23. * the address at which memory is visible to devices is not 0. Platform code
  24. * can set archdata.dma_data to an unsigned long holding the offset. By
  25. * default the offset is PCI_DRAM_OFFSET.
  26. */
  27. static u64 __maybe_unused get_pfn_limit(struct device *dev)
  28. {
  29. u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
  30. struct dev_archdata __maybe_unused *sd = &dev->archdata;
  31. #ifdef CONFIG_SWIOTLB
  32. if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops)
  33. pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
  34. #endif
  35. return pfn;
  36. }
  37. static int dma_direct_dma_supported(struct device *dev, u64 mask)
  38. {
  39. #ifdef CONFIG_PPC64
  40. u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
  41. /* Limit fits in the mask, we are good */
  42. if (mask >= limit)
  43. return 1;
  44. #ifdef CONFIG_FSL_SOC
  45. /* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however
  46. * that will have to be refined if/when they support iommus
  47. */
  48. return 1;
  49. #endif
  50. /* Sorry ... */
  51. return 0;
  52. #else
  53. return 1;
  54. #endif
  55. }
  56. void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
  57. dma_addr_t *dma_handle, gfp_t flag,
  58. struct dma_attrs *attrs)
  59. {
  60. void *ret;
  61. #ifdef CONFIG_NOT_COHERENT_CACHE
  62. ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
  63. if (ret == NULL)
  64. return NULL;
  65. *dma_handle += get_dma_offset(dev);
  66. return ret;
  67. #else
  68. struct page *page;
  69. int node = dev_to_node(dev);
  70. #ifdef CONFIG_FSL_SOC
  71. u64 pfn = get_pfn_limit(dev);
  72. int zone;
  73. /*
  74. * This code should be OK on other platforms, but we have drivers that
  75. * don't set coherent_dma_mask. As a workaround we just ifdef it. This
  76. * whole routine needs some serious cleanup.
  77. */
  78. zone = dma_pfn_limit_to_zone(pfn);
  79. if (zone < 0) {
  80. dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
  81. __func__, pfn);
  82. return NULL;
  83. }
  84. switch (zone) {
  85. case ZONE_DMA:
  86. flag |= GFP_DMA;
  87. break;
  88. #ifdef CONFIG_ZONE_DMA32
  89. case ZONE_DMA32:
  90. flag |= GFP_DMA32;
  91. break;
  92. #endif
  93. };
  94. #endif /* CONFIG_FSL_SOC */
  95. /* ignore region specifiers */
  96. flag &= ~(__GFP_HIGHMEM);
  97. page = alloc_pages_node(node, flag, get_order(size));
  98. if (page == NULL)
  99. return NULL;
  100. ret = page_address(page);
  101. memset(ret, 0, size);
  102. *dma_handle = __pa(ret) + get_dma_offset(dev);
  103. return ret;
  104. #endif
  105. }
  106. void __dma_direct_free_coherent(struct device *dev, size_t size,
  107. void *vaddr, dma_addr_t dma_handle,
  108. struct dma_attrs *attrs)
  109. {
  110. #ifdef CONFIG_NOT_COHERENT_CACHE
  111. __dma_free_coherent(size, vaddr);
  112. #else
  113. free_pages((unsigned long)vaddr, get_order(size));
  114. #endif
  115. }
  116. static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
  117. dma_addr_t *dma_handle, gfp_t flag,
  118. struct dma_attrs *attrs)
  119. {
  120. struct iommu_table *iommu;
  121. /* The coherent mask may be smaller than the real mask, check if
  122. * we can really use the direct ops
  123. */
  124. if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
  125. return __dma_direct_alloc_coherent(dev, size, dma_handle,
  126. flag, attrs);
  127. /* Ok we can't ... do we have an iommu ? If not, fail */
  128. iommu = get_iommu_table_base(dev);
  129. if (!iommu)
  130. return NULL;
  131. /* Try to use the iommu */
  132. return iommu_alloc_coherent(dev, iommu, size, dma_handle,
  133. dev->coherent_dma_mask, flag,
  134. dev_to_node(dev));
  135. }
  136. static void dma_direct_free_coherent(struct device *dev, size_t size,
  137. void *vaddr, dma_addr_t dma_handle,
  138. struct dma_attrs *attrs)
  139. {
  140. struct iommu_table *iommu;
  141. /* See comments in dma_direct_alloc_coherent() */
  142. if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
  143. return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
  144. attrs);
  145. /* Maybe we used an iommu ... */
  146. iommu = get_iommu_table_base(dev);
  147. /* If we hit that we should have never allocated in the first
  148. * place so how come we are freeing ?
  149. */
  150. if (WARN_ON(!iommu))
  151. return;
  152. iommu_free_coherent(iommu, size, vaddr, dma_handle);
  153. }
  154. int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
  155. void *cpu_addr, dma_addr_t handle, size_t size,
  156. struct dma_attrs *attrs)
  157. {
  158. unsigned long pfn;
  159. #ifdef CONFIG_NOT_COHERENT_CACHE
  160. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  161. pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
  162. #else
  163. pfn = page_to_pfn(virt_to_page(cpu_addr));
  164. #endif
  165. return remap_pfn_range(vma, vma->vm_start,
  166. pfn + vma->vm_pgoff,
  167. vma->vm_end - vma->vm_start,
  168. vma->vm_page_prot);
  169. }
  170. static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
  171. int nents, enum dma_data_direction direction,
  172. struct dma_attrs *attrs)
  173. {
  174. struct scatterlist *sg;
  175. int i;
  176. for_each_sg(sgl, sg, nents, i) {
  177. sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
  178. sg->dma_length = sg->length;
  179. __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
  180. }
  181. return nents;
  182. }
  183. static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
  184. int nents, enum dma_data_direction direction,
  185. struct dma_attrs *attrs)
  186. {
  187. }
  188. static u64 dma_direct_get_required_mask(struct device *dev)
  189. {
  190. u64 end, mask;
  191. end = memblock_end_of_DRAM() + get_dma_offset(dev);
  192. mask = 1ULL << (fls64(end) - 1);
  193. mask += mask - 1;
  194. return mask;
  195. }
  196. static inline dma_addr_t dma_direct_map_page(struct device *dev,
  197. struct page *page,
  198. unsigned long offset,
  199. size_t size,
  200. enum dma_data_direction dir,
  201. struct dma_attrs *attrs)
  202. {
  203. BUG_ON(dir == DMA_NONE);
  204. __dma_sync_page(page, offset, size, dir);
  205. return page_to_phys(page) + offset + get_dma_offset(dev);
  206. }
  207. static inline void dma_direct_unmap_page(struct device *dev,
  208. dma_addr_t dma_address,
  209. size_t size,
  210. enum dma_data_direction direction,
  211. struct dma_attrs *attrs)
  212. {
  213. }
  214. #ifdef CONFIG_NOT_COHERENT_CACHE
  215. static inline void dma_direct_sync_sg(struct device *dev,
  216. struct scatterlist *sgl, int nents,
  217. enum dma_data_direction direction)
  218. {
  219. struct scatterlist *sg;
  220. int i;
  221. for_each_sg(sgl, sg, nents, i)
  222. __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
  223. }
  224. static inline void dma_direct_sync_single(struct device *dev,
  225. dma_addr_t dma_handle, size_t size,
  226. enum dma_data_direction direction)
  227. {
  228. __dma_sync(bus_to_virt(dma_handle), size, direction);
  229. }
  230. #endif
  231. struct dma_map_ops dma_direct_ops = {
  232. .alloc = dma_direct_alloc_coherent,
  233. .free = dma_direct_free_coherent,
  234. .mmap = dma_direct_mmap_coherent,
  235. .map_sg = dma_direct_map_sg,
  236. .unmap_sg = dma_direct_unmap_sg,
  237. .dma_supported = dma_direct_dma_supported,
  238. .map_page = dma_direct_map_page,
  239. .unmap_page = dma_direct_unmap_page,
  240. .get_required_mask = dma_direct_get_required_mask,
  241. #ifdef CONFIG_NOT_COHERENT_CACHE
  242. .sync_single_for_cpu = dma_direct_sync_single,
  243. .sync_single_for_device = dma_direct_sync_single,
  244. .sync_sg_for_cpu = dma_direct_sync_sg,
  245. .sync_sg_for_device = dma_direct_sync_sg,
  246. #endif
  247. };
  248. EXPORT_SYMBOL(dma_direct_ops);
  249. int dma_set_coherent_mask(struct device *dev, u64 mask)
  250. {
  251. if (!dma_supported(dev, mask)) {
  252. /*
  253. * We need to special case the direct DMA ops which can
  254. * support a fallback for coherent allocations. There
  255. * is no dma_op->set_coherent_mask() so we have to do
  256. * things the hard way:
  257. */
  258. if (get_dma_ops(dev) != &dma_direct_ops ||
  259. get_iommu_table_base(dev) == NULL ||
  260. !dma_iommu_dma_supported(dev, mask))
  261. return -EIO;
  262. }
  263. dev->coherent_dma_mask = mask;
  264. return 0;
  265. }
  266. EXPORT_SYMBOL(dma_set_coherent_mask);
  267. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  268. int __dma_set_mask(struct device *dev, u64 dma_mask)
  269. {
  270. struct dma_map_ops *dma_ops = get_dma_ops(dev);
  271. if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
  272. return dma_ops->set_dma_mask(dev, dma_mask);
  273. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  274. return -EIO;
  275. *dev->dma_mask = dma_mask;
  276. return 0;
  277. }
  278. int dma_set_mask(struct device *dev, u64 dma_mask)
  279. {
  280. if (ppc_md.dma_set_mask)
  281. return ppc_md.dma_set_mask(dev, dma_mask);
  282. if (dev_is_pci(dev)) {
  283. struct pci_dev *pdev = to_pci_dev(dev);
  284. struct pci_controller *phb = pci_bus_to_host(pdev->bus);
  285. if (phb->controller_ops.dma_set_mask)
  286. return phb->controller_ops.dma_set_mask(pdev, dma_mask);
  287. }
  288. return __dma_set_mask(dev, dma_mask);
  289. }
  290. EXPORT_SYMBOL(dma_set_mask);
  291. u64 __dma_get_required_mask(struct device *dev)
  292. {
  293. struct dma_map_ops *dma_ops = get_dma_ops(dev);
  294. if (unlikely(dma_ops == NULL))
  295. return 0;
  296. if (dma_ops->get_required_mask)
  297. return dma_ops->get_required_mask(dev);
  298. return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
  299. }
  300. u64 dma_get_required_mask(struct device *dev)
  301. {
  302. if (ppc_md.dma_get_required_mask)
  303. return ppc_md.dma_get_required_mask(dev);
  304. if (dev_is_pci(dev)) {
  305. struct pci_dev *pdev = to_pci_dev(dev);
  306. struct pci_controller *phb = pci_bus_to_host(pdev->bus);
  307. if (phb->controller_ops.dma_get_required_mask)
  308. return phb->controller_ops.dma_get_required_mask(pdev);
  309. }
  310. return __dma_get_required_mask(dev);
  311. }
  312. EXPORT_SYMBOL_GPL(dma_get_required_mask);
  313. static int __init dma_init(void)
  314. {
  315. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  316. #ifdef CONFIG_PCI
  317. dma_debug_add_bus(&pci_bus_type);
  318. #endif
  319. #ifdef CONFIG_IBMVIO
  320. dma_debug_add_bus(&vio_bus_type);
  321. #endif
  322. return 0;
  323. }
  324. fs_initcall(dma_init);