dma-mapping.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. #ifndef _PARISC_DMA_MAPPING_H
  2. #define _PARISC_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <linux/scatterlist.h>
  5. #include <asm/cacheflush.h>
  6. /* See Documentation/DMA-API-HOWTO.txt */
  7. struct hppa_dma_ops {
  8. int (*dma_supported)(struct device *dev, u64 mask);
  9. void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
  10. void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
  11. void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
  12. dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
  13. void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
  14. int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
  15. void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
  16. void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
  17. void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
  18. void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
  19. void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
  20. };
  21. /*
  22. ** We could live without the hppa_dma_ops indirection if we didn't want
  23. ** to support 4 different coherent dma models with one binary (they will
  24. ** someday be loadable modules):
  25. ** I/O MMU consistent method dma_sync behavior
  26. ** ============= ====================== =======================
  27. ** a) PA-7x00LC uncachable host memory flush/purge
  28. ** b) U2/Uturn cachable host memory NOP
  29. ** c) Ike/Astro cachable host memory NOP
  30. ** d) EPIC/SAGA memory on EPIC/SAGA flush/reset DMA channel
  31. **
  32. ** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU.
  33. **
  34. ** Systems (eg PCX-T workstations) that don't fall into the above
  35. ** categories will need to modify the needed drivers to perform
  36. ** flush/purge and allocate "regular" cacheable pages for everything.
  37. */
  38. #define DMA_ERROR_CODE (~(dma_addr_t)0)
  39. #ifdef CONFIG_PA11
  40. extern struct hppa_dma_ops pcxl_dma_ops;
  41. extern struct hppa_dma_ops pcx_dma_ops;
  42. #endif
  43. extern struct hppa_dma_ops *hppa_dma_ops;
  44. #define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f)
  45. #define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f)
  46. static inline void *
  47. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  48. gfp_t flag)
  49. {
  50. return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
  51. }
  52. static inline void *
  53. dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  54. gfp_t flag)
  55. {
  56. return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
  57. }
  58. static inline void
  59. dma_free_coherent(struct device *dev, size_t size,
  60. void *vaddr, dma_addr_t dma_handle)
  61. {
  62. hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
  63. }
  64. static inline void
  65. dma_free_noncoherent(struct device *dev, size_t size,
  66. void *vaddr, dma_addr_t dma_handle)
  67. {
  68. hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
  69. }
  70. static inline dma_addr_t
  71. dma_map_single(struct device *dev, void *ptr, size_t size,
  72. enum dma_data_direction direction)
  73. {
  74. return hppa_dma_ops->map_single(dev, ptr, size, direction);
  75. }
  76. static inline void
  77. dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  78. enum dma_data_direction direction)
  79. {
  80. hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
  81. }
  82. static inline int
  83. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  84. enum dma_data_direction direction)
  85. {
  86. return hppa_dma_ops->map_sg(dev, sg, nents, direction);
  87. }
  88. static inline void
  89. dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  90. enum dma_data_direction direction)
  91. {
  92. hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
  93. }
  94. static inline dma_addr_t
  95. dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  96. size_t size, enum dma_data_direction direction)
  97. {
  98. return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
  99. }
  100. static inline void
  101. dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  102. enum dma_data_direction direction)
  103. {
  104. dma_unmap_single(dev, dma_address, size, direction);
  105. }
  106. static inline void
  107. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  108. enum dma_data_direction direction)
  109. {
  110. if(hppa_dma_ops->dma_sync_single_for_cpu)
  111. hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
  112. }
  113. static inline void
  114. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  115. enum dma_data_direction direction)
  116. {
  117. if(hppa_dma_ops->dma_sync_single_for_device)
  118. hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
  119. }
  120. static inline void
  121. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  122. unsigned long offset, size_t size,
  123. enum dma_data_direction direction)
  124. {
  125. if(hppa_dma_ops->dma_sync_single_for_cpu)
  126. hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
  127. }
  128. static inline void
  129. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  130. unsigned long offset, size_t size,
  131. enum dma_data_direction direction)
  132. {
  133. if(hppa_dma_ops->dma_sync_single_for_device)
  134. hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
  135. }
  136. static inline void
  137. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  138. enum dma_data_direction direction)
  139. {
  140. if(hppa_dma_ops->dma_sync_sg_for_cpu)
  141. hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
  142. }
  143. static inline void
  144. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  145. enum dma_data_direction direction)
  146. {
  147. if(hppa_dma_ops->dma_sync_sg_for_device)
  148. hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
  149. }
  150. static inline int
  151. dma_supported(struct device *dev, u64 mask)
  152. {
  153. return hppa_dma_ops->dma_supported(dev, mask);
  154. }
  155. static inline int
  156. dma_set_mask(struct device *dev, u64 mask)
  157. {
  158. if(!dev->dma_mask || !dma_supported(dev, mask))
  159. return -EIO;
  160. *dev->dma_mask = mask;
  161. return 0;
  162. }
  163. static inline void
  164. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  165. enum dma_data_direction direction)
  166. {
  167. if(hppa_dma_ops->dma_sync_single_for_cpu)
  168. flush_kernel_dcache_range((unsigned long)vaddr, size);
  169. }
  170. static inline void *
  171. parisc_walk_tree(struct device *dev)
  172. {
  173. struct device *otherdev;
  174. if(likely(dev->platform_data != NULL))
  175. return dev->platform_data;
  176. /* OK, just traverse the bus to find it */
  177. for(otherdev = dev->parent; otherdev;
  178. otherdev = otherdev->parent) {
  179. if(otherdev->platform_data) {
  180. dev->platform_data = otherdev->platform_data;
  181. break;
  182. }
  183. }
  184. return dev->platform_data;
  185. }
  186. #define GET_IOC(dev) ({ \
  187. void *__pdata = parisc_walk_tree(dev); \
  188. __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
  189. })
  190. #ifdef CONFIG_IOMMU_CCIO
  191. struct parisc_device;
  192. struct ioc;
  193. void * ccio_get_iommu(const struct parisc_device *dev);
  194. int ccio_request_resource(const struct parisc_device *dev,
  195. struct resource *res);
  196. int ccio_allocate_resource(const struct parisc_device *dev,
  197. struct resource *res, unsigned long size,
  198. unsigned long min, unsigned long max, unsigned long align);
  199. #else /* !CONFIG_IOMMU_CCIO */
  200. #define ccio_get_iommu(dev) NULL
  201. #define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
  202. #define ccio_allocate_resource(dev, res, size, min, max, align) \
  203. allocate_resource(&iomem_resource, res, size, min, max, \
  204. align, NULL, NULL)
  205. #endif /* !CONFIG_IOMMU_CCIO */
  206. #ifdef CONFIG_IOMMU_SBA
  207. struct parisc_device;
  208. void * sba_get_iommu(struct parisc_device *dev);
  209. #endif
  210. /* At the moment, we panic on error for IOMMU resource exaustion */
  211. #define dma_mapping_error(dev, x) 0
  212. /* This API cannot be supported on PA-RISC */
  213. static inline int dma_mmap_coherent(struct device *dev,
  214. struct vm_area_struct *vma, void *cpu_addr,
  215. dma_addr_t dma_addr, size_t size)
  216. {
  217. return -EINVAL;
  218. }
  219. static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
  220. void *cpu_addr, dma_addr_t dma_addr,
  221. size_t size)
  222. {
  223. return -EINVAL;
  224. }
  225. #endif