dma-mapping.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /*
  2. * DMA Mapping glue for ARC
  3. *
  4. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef ASM_ARC_DMA_MAPPING_H
  11. #define ASM_ARC_DMA_MAPPING_H
  12. #include <asm-generic/dma-coherent.h>
  13. #include <asm/cacheflush.h>
  14. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  15. dma_addr_t *dma_handle, gfp_t gfp);
  16. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  17. dma_addr_t dma_handle);
  18. void *dma_alloc_coherent(struct device *dev, size_t size,
  19. dma_addr_t *dma_handle, gfp_t gfp);
  20. void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
  21. dma_addr_t dma_handle);
  22. /* drivers/base/dma-mapping.c */
  23. extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  24. void *cpu_addr, dma_addr_t dma_addr, size_t size);
  25. extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  26. void *cpu_addr, dma_addr_t dma_addr,
  27. size_t size);
  28. #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
  29. #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
  30. /*
  31. * streaming DMA Mapping API...
  32. * CPU accesses page via normal paddr, thus needs to explicitly made
  33. * consistent before each use
  34. */
  35. static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
  36. enum dma_data_direction dir)
  37. {
  38. switch (dir) {
  39. case DMA_FROM_DEVICE:
  40. dma_cache_inv(paddr, size);
  41. break;
  42. case DMA_TO_DEVICE:
  43. dma_cache_wback(paddr, size);
  44. break;
  45. case DMA_BIDIRECTIONAL:
  46. dma_cache_wback_inv(paddr, size);
  47. break;
  48. default:
  49. pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
  50. }
  51. }
  52. void __arc_dma_cache_sync(unsigned long paddr, size_t size,
  53. enum dma_data_direction dir);
  54. #define _dma_cache_sync(addr, sz, dir) \
  55. do { \
  56. if (__builtin_constant_p(dir)) \
  57. __inline_dma_cache_sync(addr, sz, dir); \
  58. else \
  59. __arc_dma_cache_sync(addr, sz, dir); \
  60. } \
  61. while (0);
  62. static inline dma_addr_t
  63. dma_map_single(struct device *dev, void *cpu_addr, size_t size,
  64. enum dma_data_direction dir)
  65. {
  66. _dma_cache_sync((unsigned long)cpu_addr, size, dir);
  67. return (dma_addr_t)cpu_addr;
  68. }
  69. static inline void
  70. dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  71. size_t size, enum dma_data_direction dir)
  72. {
  73. }
  74. static inline dma_addr_t
  75. dma_map_page(struct device *dev, struct page *page,
  76. unsigned long offset, size_t size,
  77. enum dma_data_direction dir)
  78. {
  79. unsigned long paddr = page_to_phys(page) + offset;
  80. return dma_map_single(dev, (void *)paddr, size, dir);
  81. }
  82. static inline void
  83. dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
  84. size_t size, enum dma_data_direction dir)
  85. {
  86. }
  87. static inline int
  88. dma_map_sg(struct device *dev, struct scatterlist *sg,
  89. int nents, enum dma_data_direction dir)
  90. {
  91. struct scatterlist *s;
  92. int i;
  93. for_each_sg(sg, s, nents, i)
  94. s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
  95. s->length, dir);
  96. return nents;
  97. }
  98. static inline void
  99. dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  100. int nents, enum dma_data_direction dir)
  101. {
  102. struct scatterlist *s;
  103. int i;
  104. for_each_sg(sg, s, nents, i)
  105. dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
  106. }
  107. static inline void
  108. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  109. size_t size, enum dma_data_direction dir)
  110. {
  111. _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
  112. }
  113. static inline void
  114. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  115. size_t size, enum dma_data_direction dir)
  116. {
  117. _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
  118. }
  119. static inline void
  120. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  121. unsigned long offset, size_t size,
  122. enum dma_data_direction direction)
  123. {
  124. _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE);
  125. }
  126. static inline void
  127. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  128. unsigned long offset, size_t size,
  129. enum dma_data_direction direction)
  130. {
  131. _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE);
  132. }
  133. static inline void
  134. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
  135. enum dma_data_direction dir)
  136. {
  137. int i;
  138. struct scatterlist *sg;
  139. for_each_sg(sglist, sg, nelems, i)
  140. _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
  141. }
  142. static inline void
  143. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
  144. int nelems, enum dma_data_direction dir)
  145. {
  146. int i;
  147. struct scatterlist *sg;
  148. for_each_sg(sglist, sg, nelems, i)
  149. _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
  150. }
  151. static inline int dma_supported(struct device *dev, u64 dma_mask)
  152. {
  153. /* Support 32 bit DMA mask exclusively */
  154. return dma_mask == DMA_BIT_MASK(32);
  155. }
  156. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  157. {
  158. return 0;
  159. }
  160. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  161. {
  162. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  163. return -EIO;
  164. *dev->dma_mask = dma_mask;
  165. return 0;
  166. }
  167. #endif