dma-mapping.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /* DMA mapping routines for the MN10300 arch
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_DMA_MAPPING_H
  12. #define _ASM_DMA_MAPPING_H
  13. #include <linux/mm.h>
  14. #include <linux/scatterlist.h>
  15. #include <asm/cache.h>
  16. #include <asm/io.h>
  17. /*
  18. * See Documentation/DMA-API.txt for the description of how the
  19. * following DMA API should work.
  20. */
  21. extern void *dma_alloc_coherent(struct device *dev, size_t size,
  22. dma_addr_t *dma_handle, int flag);
  23. extern void dma_free_coherent(struct device *dev, size_t size,
  24. void *vaddr, dma_addr_t dma_handle);
  25. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
  26. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
  27. static inline
  28. dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
  29. enum dma_data_direction direction)
  30. {
  31. BUG_ON(direction == DMA_NONE);
  32. mn10300_dcache_flush_inv();
  33. return virt_to_bus(ptr);
  34. }
  35. static inline
  36. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  37. enum dma_data_direction direction)
  38. {
  39. BUG_ON(direction == DMA_NONE);
  40. }
  41. static inline
  42. int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
  43. enum dma_data_direction direction)
  44. {
  45. struct scatterlist *sg;
  46. int i;
  47. BUG_ON(!valid_dma_direction(direction));
  48. WARN_ON(nents == 0 || sglist[0].length == 0);
  49. for_each_sg(sglist, sg, nents, i) {
  50. BUG_ON(!sg_page(sg));
  51. sg->dma_address = sg_phys(sg);
  52. }
  53. mn10300_dcache_flush_inv();
  54. return nents;
  55. }
  56. static inline
  57. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  58. enum dma_data_direction direction)
  59. {
  60. BUG_ON(!valid_dma_direction(direction));
  61. }
  62. static inline
  63. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  64. unsigned long offset, size_t size,
  65. enum dma_data_direction direction)
  66. {
  67. BUG_ON(direction == DMA_NONE);
  68. return page_to_bus(page) + offset;
  69. }
  70. static inline
  71. void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  72. enum dma_data_direction direction)
  73. {
  74. BUG_ON(direction == DMA_NONE);
  75. }
  76. static inline
  77. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  78. size_t size, enum dma_data_direction direction)
  79. {
  80. }
  81. static inline
  82. void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  83. size_t size, enum dma_data_direction direction)
  84. {
  85. mn10300_dcache_flush_inv();
  86. }
  87. static inline
  88. void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  89. unsigned long offset, size_t size,
  90. enum dma_data_direction direction)
  91. {
  92. }
  93. static inline void
  94. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  95. unsigned long offset, size_t size,
  96. enum dma_data_direction direction)
  97. {
  98. mn10300_dcache_flush_inv();
  99. }
  100. static inline
  101. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  102. int nelems, enum dma_data_direction direction)
  103. {
  104. }
  105. static inline
  106. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  107. int nelems, enum dma_data_direction direction)
  108. {
  109. mn10300_dcache_flush_inv();
  110. }
  111. static inline
  112. int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  113. {
  114. return 0;
  115. }
  116. static inline
  117. int dma_supported(struct device *dev, u64 mask)
  118. {
  119. /*
  120. * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
  121. * guarantee allocations that must be within a tighter range than
  122. * GFP_DMA
  123. */
  124. if (mask < 0x00ffffff)
  125. return 0;
  126. return 1;
  127. }
  128. static inline
  129. int dma_set_mask(struct device *dev, u64 mask)
  130. {
  131. if (!dev->dma_mask || !dma_supported(dev, mask))
  132. return -EIO;
  133. *dev->dma_mask = mask;
  134. return 0;
  135. }
  136. static inline
  137. void dma_cache_sync(void *vaddr, size_t size,
  138. enum dma_data_direction direction)
  139. {
  140. mn10300_dcache_flush_inv();
  141. }
  142. /* Not supported for now */
  143. static inline int dma_mmap_coherent(struct device *dev,
  144. struct vm_area_struct *vma, void *cpu_addr,
  145. dma_addr_t dma_addr, size_t size)
  146. {
  147. return -EINVAL;
  148. }
  149. static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
  150. void *cpu_addr, dma_addr_t dma_addr,
  151. size_t size)
  152. {
  153. return -EINVAL;
  154. }
  155. #endif