proc-macros.S 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. /*
  2. * We need constants.h for:
  3. * VMA_VM_MM
  4. * VMA_VM_FLAGS
  5. * VM_EXEC
  6. */
  7. #include <asm/asm-offsets.h>
  8. #include <asm/thread_info.h>
  9. /*
  10. * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
  11. */
  12. .macro vma_vm_mm, rd, rn
  13. ldr \rd, [\rn, #VMA_VM_MM]
  14. .endm
  15. /*
  16. * vma_vm_flags - get vma->vm_flags
  17. */
  18. .macro vma_vm_flags, rd, rn
  19. ldr \rd, [\rn, #VMA_VM_FLAGS]
  20. .endm
  21. .macro tsk_mm, rd, rn
  22. ldr \rd, [\rn, #TI_TASK]
  23. ldr \rd, [\rd, #TSK_ACTIVE_MM]
  24. .endm
  25. /*
  26. * act_mm - get current->active_mm
  27. */
  28. .macro act_mm, rd
  29. bic \rd, sp, #8128
  30. bic \rd, \rd, #63
  31. ldr \rd, [\rd, #TI_TASK]
  32. ldr \rd, [\rd, #TSK_ACTIVE_MM]
  33. .endm
  34. /*
  35. * mmid - get context id from mm pointer (mm->context.id)
  36. * note, this field is 64bit, so in big-endian the two words are swapped too.
  37. */
  38. .macro mmid, rd, rn
  39. #ifdef __ARMEB__
  40. ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ]
  41. #else
  42. ldr \rd, [\rn, #MM_CONTEXT_ID]
  43. #endif
  44. .endm
  45. /*
  46. * mask_asid - mask the ASID from the context ID
  47. */
  48. .macro asid, rd, rn
  49. and \rd, \rn, #255
  50. .endm
  51. .macro crval, clear, mmuset, ucset
  52. #ifdef CONFIG_MMU
  53. .word \clear
  54. .word \mmuset
  55. #else
  56. .word \clear
  57. .word \ucset
  58. #endif
  59. .endm
  60. /*
  61. * dcache_line_size - get the minimum D-cache line size from the CTR register
  62. * on ARMv7.
  63. */
  64. .macro dcache_line_size, reg, tmp
  65. mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
  66. lsr \tmp, \tmp, #16
  67. and \tmp, \tmp, #0xf @ cache line size encoding
  68. mov \reg, #4 @ bytes per word
  69. mov \reg, \reg, lsl \tmp @ actual cache line size
  70. .endm
  71. /*
  72. * icache_line_size - get the minimum I-cache line size from the CTR register
  73. * on ARMv7.
  74. */
  75. .macro icache_line_size, reg, tmp
  76. mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
  77. and \tmp, \tmp, #0xf @ cache line size encoding
  78. mov \reg, #4 @ bytes per word
  79. mov \reg, \reg, lsl \tmp @ actual cache line size
  80. .endm
  81. /*
  82. * Sanity check the PTE configuration for the code below - which makes
  83. * certain assumptions about how these bits are laid out.
  84. */
  85. #ifdef CONFIG_MMU
  86. #if L_PTE_SHARED != PTE_EXT_SHARED
  87. #error PTE shared bit mismatch
  88. #endif
  89. #if !defined (CONFIG_ARM_LPAE) && \
  90. (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
  91. L_PTE_PRESENT) > L_PTE_SHARED
  92. #error Invalid Linux PTE bit settings
  93. #endif
  94. #endif /* CONFIG_MMU */
  95. /*
  96. * The ARMv6 and ARMv7 set_pte_ext translation function.
  97. *
  98. * Permission translation:
  99. * YUWD APX AP1 AP0 SVC User
  100. * 0xxx 0 0 0 no acc no acc
  101. * 100x 1 0 1 r/o no acc
  102. * 10x0 1 0 1 r/o no acc
  103. * 1011 0 0 1 r/w no acc
  104. * 110x 1 1 1 r/o r/o
  105. * 11x0 1 1 1 r/o r/o
  106. * 1111 0 1 1 r/w r/w
  107. */
  108. .macro armv6_mt_table pfx
  109. \pfx\()_mt_table:
  110. .long 0x00 @ L_PTE_MT_UNCACHED
  111. .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
  112. .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
  113. .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
  114. .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
  115. .long 0x00 @ unused
  116. .long 0x00 @ L_PTE_MT_MINICACHE (not present)
  117. .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
  118. .long 0x00 @ unused
  119. .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC
  120. .long 0x00 @ unused
  121. .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
  122. .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
  123. .long 0x00 @ unused
  124. .long 0x00 @ unused
  125. .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
  126. .endm
  127. .macro armv6_set_pte_ext pfx
  128. str r1, [r0], #2048 @ linux version
  129. bic r3, r1, #0x000003fc
  130. bic r3, r3, #PTE_TYPE_MASK
  131. orr r3, r3, r2
  132. orr r3, r3, #PTE_EXT_AP0 | 2
  133. adr ip, \pfx\()_mt_table
  134. and r2, r1, #L_PTE_MT_MASK
  135. ldr r2, [ip, r2]
  136. eor r1, r1, #L_PTE_DIRTY
  137. tst r1, #L_PTE_DIRTY|L_PTE_RDONLY
  138. orrne r3, r3, #PTE_EXT_APX
  139. tst r1, #L_PTE_USER
  140. orrne r3, r3, #PTE_EXT_AP1
  141. tstne r3, #PTE_EXT_APX
  142. @ user read-only -> kernel read-only
  143. bicne r3, r3, #PTE_EXT_AP0
  144. tst r1, #L_PTE_XN
  145. orrne r3, r3, #PTE_EXT_XN
  146. eor r3, r3, r2
  147. tst r1, #L_PTE_YOUNG
  148. tstne r1, #L_PTE_PRESENT
  149. moveq r3, #0
  150. tstne r1, #L_PTE_NONE
  151. movne r3, #0
  152. str r3, [r0]
  153. mcr p15, 0, r0, c7, c10, 1 @ flush_pte
  154. .endm
  155. /*
  156. * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function,
  157. * covering most CPUs except Xscale and Xscale 3.
  158. *
  159. * Permission translation:
  160. * YUWD AP SVC User
  161. * 0xxx 0x00 no acc no acc
  162. * 100x 0x00 r/o no acc
  163. * 10x0 0x00 r/o no acc
  164. * 1011 0x55 r/w no acc
  165. * 110x 0xaa r/w r/o
  166. * 11x0 0xaa r/w r/o
  167. * 1111 0xff r/w r/w
  168. */
  169. .macro armv3_set_pte_ext wc_disable=1
  170. str r1, [r0], #2048 @ linux version
  171. eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY
  172. bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
  173. bic r2, r2, #PTE_TYPE_MASK
  174. orr r2, r2, #PTE_TYPE_SMALL
  175. tst r3, #L_PTE_USER @ user?
  176. orrne r2, r2, #PTE_SMALL_AP_URO_SRW
  177. tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty?
  178. orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
  179. tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
  180. movne r2, #0
  181. .if \wc_disable
  182. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  183. tst r2, #PTE_CACHEABLE
  184. bicne r2, r2, #PTE_BUFFERABLE
  185. #endif
  186. .endif
  187. str r2, [r0] @ hardware version
  188. .endm
  189. /*
  190. * Xscale set_pte_ext translation, split into two halves to cope
  191. * with work-arounds. r3 must be preserved by code between these
  192. * two macros.
  193. *
  194. * Permission translation:
  195. * YUWD AP SVC User
  196. * 0xxx 00 no acc no acc
  197. * 100x 00 r/o no acc
  198. * 10x0 00 r/o no acc
  199. * 1011 01 r/w no acc
  200. * 110x 10 r/w r/o
  201. * 11x0 10 r/w r/o
  202. * 1111 11 r/w r/w
  203. */
  204. .macro xscale_set_pte_ext_prologue
  205. str r1, [r0] @ linux version
  206. eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY
  207. bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
  208. orr r2, r2, #PTE_TYPE_EXT @ extended page
  209. tst r3, #L_PTE_USER @ user?
  210. orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
  211. tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty?
  212. orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
  213. @ combined with user -> user r/w
  214. .endm
  215. .macro xscale_set_pte_ext_epilogue
  216. tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
  217. movne r2, #0 @ no -> fault
  218. str r2, [r0, #2048]! @ hardware version
  219. mov ip, #0
  220. mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
  221. mcr p15, 0, ip, c7, c10, 4 @ data write barrier
  222. .endm
  223. .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
  224. .type \name\()_processor_functions, #object
  225. .align 2
  226. ENTRY(\name\()_processor_functions)
  227. .word \dabort
  228. .word \pabort
  229. .word cpu_\name\()_proc_init
  230. .word cpu_\name\()_proc_fin
  231. .word cpu_\name\()_reset
  232. .word cpu_\name\()_do_idle
  233. .word cpu_\name\()_dcache_clean_area
  234. .word cpu_\name\()_switch_mm
  235. .if \nommu
  236. .word 0
  237. .else
  238. .word cpu_\name\()_set_pte_ext
  239. .endif
  240. .if \suspend
  241. .word cpu_\name\()_suspend_size
  242. #ifdef CONFIG_ARM_CPU_SUSPEND
  243. .word cpu_\name\()_do_suspend
  244. .word cpu_\name\()_do_resume
  245. #else
  246. .word 0
  247. .word 0
  248. #endif
  249. .else
  250. .word 0
  251. .word 0
  252. .word 0
  253. .endif
  254. .size \name\()_processor_functions, . - \name\()_processor_functions
  255. .endm
  256. .macro define_cache_functions name:req
  257. .align 2
  258. .type \name\()_cache_fns, #object
  259. ENTRY(\name\()_cache_fns)
  260. .long \name\()_flush_icache_all
  261. .long \name\()_flush_kern_cache_all
  262. .long \name\()_flush_kern_cache_louis
  263. .long \name\()_flush_user_cache_all
  264. .long \name\()_flush_user_cache_range
  265. .long \name\()_coherent_kern_range
  266. .long \name\()_coherent_user_range
  267. .long \name\()_flush_kern_dcache_area
  268. .long \name\()_dma_map_area
  269. .long \name\()_dma_unmap_area
  270. .long \name\()_dma_flush_range
  271. .size \name\()_cache_fns, . - \name\()_cache_fns
  272. .endm
  273. .macro define_tlb_functions name:req, flags_up:req, flags_smp
  274. .type \name\()_tlb_fns, #object
  275. ENTRY(\name\()_tlb_fns)
  276. .long \name\()_flush_user_tlb_range
  277. .long \name\()_flush_kern_tlb_range
  278. .ifnb \flags_smp
  279. ALT_SMP(.long \flags_smp )
  280. ALT_UP(.long \flags_up )
  281. .else
  282. .long \flags_up
  283. .endif
  284. .size \name\()_tlb_fns, . - \name\()_tlb_fns
  285. .endm
  286. .macro globl_equ x, y
  287. .globl \x
  288. .equ \x, \y
  289. .endm
  290. .macro initfn, func, base
  291. .long \func - \base
  292. .endm
  293. /*
  294. * Macro to calculate the log2 size for the protection region
  295. * registers. This calculates rd = log2(size) - 1. tmp must
  296. * not be the same register as rd.
  297. */
  298. .macro pr_sz, rd, size, tmp
  299. mov \tmp, \size, lsr #12
  300. mov \rd, #11
  301. 1: movs \tmp, \tmp, lsr #1
  302. addne \rd, \rd, #1
  303. bne 1b
  304. .endm
  305. /*
  306. * Macro to generate a protection region register value
  307. * given a pre-masked address, size, and enable bit.
  308. * Corrupts size.
  309. */
  310. .macro pr_val, dest, addr, size, enable
  311. pr_sz \dest, \size, \size @ calculate log2(size) - 1
  312. orr \dest, \addr, \dest, lsl #1 @ mask in the region size
  313. orr \dest, \dest, \enable
  314. .endm