cache.S 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /*
  2. * Cache maintenance
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/errno.h>
  20. #include <linux/linkage.h>
  21. #include <linux/init.h>
  22. #include <asm/assembler.h>
  23. #include <asm/cpufeature.h>
  24. #include <asm/alternative.h>
  25. #include "proc-macros.S"
  26. /*
  27. * flush_icache_range(start,end)
  28. *
  29. * Ensure that the I and D caches are coherent within specified region.
  30. * This is typically used when code has been written to a memory region,
  31. * and will be executed.
  32. *
  33. * - start - virtual start address of region
  34. * - end - virtual end address of region
  35. */
  36. ENTRY(flush_icache_range)
  37. /* FALLTHROUGH */
  38. /*
  39. * __flush_cache_user_range(start,end)
  40. *
  41. * Ensure that the I and D caches are coherent within specified region.
  42. * This is typically used when code has been written to a memory region,
  43. * and will be executed.
  44. *
  45. * - start - virtual start address of region
  46. * - end - virtual end address of region
  47. */
  48. ENTRY(__flush_cache_user_range)
  49. dcache_line_size x2, x3
  50. sub x3, x2, #1
  51. bic x4, x0, x3
  52. 1:
  53. USER(9f, dc cvau, x4 ) // clean D line to PoU
  54. add x4, x4, x2
  55. cmp x4, x1
  56. b.lo 1b
  57. dsb ish
  58. icache_line_size x2, x3
  59. sub x3, x2, #1
  60. bic x4, x0, x3
  61. 1:
  62. USER(9f, ic ivau, x4 ) // invalidate I line PoU
  63. add x4, x4, x2
  64. cmp x4, x1
  65. b.lo 1b
  66. dsb ish
  67. isb
  68. mov x0, #0
  69. ret
  70. 9:
  71. mov x0, #-EFAULT
  72. ret
  73. ENDPROC(flush_icache_range)
  74. ENDPROC(__flush_cache_user_range)
  75. /*
  76. * __flush_dcache_area(kaddr, size)
  77. *
  78. * Ensure that the data held in the page kaddr is written back to the
  79. * page in question.
  80. *
  81. * - kaddr - kernel address
  82. * - size - size in question
  83. */
  84. ENTRY(__flush_dcache_area)
  85. dcache_line_size x2, x3
  86. add x1, x0, x1
  87. sub x3, x2, #1
  88. bic x0, x0, x3
  89. 1: dc civac, x0 // clean & invalidate D line / unified line
  90. add x0, x0, x2
  91. cmp x0, x1
  92. b.lo 1b
  93. dsb sy
  94. ret
  95. ENDPIPROC(__flush_dcache_area)
  96. /*
  97. * __inval_cache_range(start, end)
  98. * - start - start address of region
  99. * - end - end address of region
  100. */
  101. ENTRY(__inval_cache_range)
  102. /* FALLTHROUGH */
  103. /*
  104. * __dma_inv_range(start, end)
  105. * - start - virtual start address of region
  106. * - end - virtual end address of region
  107. */
  108. __dma_inv_range:
  109. dcache_line_size x2, x3
  110. sub x3, x2, #1
  111. tst x1, x3 // end cache line aligned?
  112. bic x1, x1, x3
  113. b.eq 1f
  114. dc civac, x1 // clean & invalidate D / U line
  115. 1: tst x0, x3 // start cache line aligned?
  116. bic x0, x0, x3
  117. b.eq 2f
  118. dc civac, x0 // clean & invalidate D / U line
  119. b 3f
  120. 2: dc ivac, x0 // invalidate D / U line
  121. 3: add x0, x0, x2
  122. cmp x0, x1
  123. b.lo 2b
  124. dsb sy
  125. ret
  126. ENDPIPROC(__inval_cache_range)
  127. ENDPROC(__dma_inv_range)
  128. /*
  129. * __dma_clean_range(start, end)
  130. * - start - virtual start address of region
  131. * - end - virtual end address of region
  132. */
  133. __dma_clean_range:
  134. dcache_line_size x2, x3
  135. sub x3, x2, #1
  136. bic x0, x0, x3
  137. 1:
  138. alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
  139. dc cvac, x0
  140. alternative_else
  141. dc civac, x0
  142. alternative_endif
  143. add x0, x0, x2
  144. cmp x0, x1
  145. b.lo 1b
  146. dsb sy
  147. ret
  148. ENDPROC(__dma_clean_range)
  149. /*
  150. * __dma_flush_range(start, end)
  151. * - start - virtual start address of region
  152. * - end - virtual end address of region
  153. */
  154. ENTRY(__dma_flush_range)
  155. dcache_line_size x2, x3
  156. sub x3, x2, #1
  157. bic x0, x0, x3
  158. 1: dc civac, x0 // clean & invalidate D / U line
  159. add x0, x0, x2
  160. cmp x0, x1
  161. b.lo 1b
  162. dsb sy
  163. ret
  164. ENDPIPROC(__dma_flush_range)
  165. /*
  166. * __dma_map_area(start, size, dir)
  167. * - start - kernel virtual start address
  168. * - size - size of region
  169. * - dir - DMA direction
  170. */
  171. ENTRY(__dma_map_area)
  172. add x1, x1, x0
  173. cmp w2, #DMA_FROM_DEVICE
  174. b.eq __dma_inv_range
  175. b __dma_clean_range
  176. ENDPIPROC(__dma_map_area)
  177. /*
  178. * __dma_unmap_area(start, size, dir)
  179. * - start - kernel virtual start address
  180. * - size - size of region
  181. * - dir - DMA direction
  182. */
  183. ENTRY(__dma_unmap_area)
  184. add x1, x1, x0
  185. cmp w2, #DMA_TO_DEVICE
  186. b.ne __dma_inv_range
  187. ret
  188. ENDPIPROC(__dma_unmap_area)