assembler.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * Based on arch/arm/include/asm/assembler.h
  3. *
  4. * Copyright (C) 1996-2000 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifndef __ASSEMBLY__
  20. #error "Only include this from assembly code"
  21. #endif
  22. #ifndef __ASM_ASSEMBLER_H
  23. #define __ASM_ASSEMBLER_H
  24. #include <asm/cputype.h>
  25. #include <asm/ptrace.h>
  26. #include <asm/thread_info.h>
  27. /*
  28. * Stack pushing/popping (register pairs only). Equivalent to store decrement
  29. * before, load increment after.
  30. */
  31. .macro push, xreg1, xreg2
  32. stp \xreg1, \xreg2, [sp, #-16]!
  33. .endm
  34. .macro pop, xreg1, xreg2
  35. ldp \xreg1, \xreg2, [sp], #16
  36. .endm
  37. /*
  38. * Enable and disable interrupts.
  39. */
  40. .macro disable_irq
  41. msr daifset, #2
  42. .endm
  43. .macro enable_irq
  44. msr daifclr, #2
  45. .endm
  46. /*
  47. * Enable and disable debug exceptions.
  48. */
  49. .macro disable_dbg
  50. msr daifset, #8
  51. .endm
  52. .macro enable_dbg
  53. msr daifclr, #8
  54. .endm
  55. .macro disable_step_tsk, flgs, tmp
  56. tbz \flgs, #TIF_SINGLESTEP, 9990f
  57. mrs \tmp, mdscr_el1
  58. bic \tmp, \tmp, #1
  59. msr mdscr_el1, \tmp
  60. isb // Synchronise with enable_dbg
  61. 9990:
  62. .endm
  63. .macro enable_step_tsk, flgs, tmp
  64. tbz \flgs, #TIF_SINGLESTEP, 9990f
  65. disable_dbg
  66. mrs \tmp, mdscr_el1
  67. orr \tmp, \tmp, #1
  68. msr mdscr_el1, \tmp
  69. 9990:
  70. .endm
  71. /*
  72. * Enable both debug exceptions and interrupts. This is likely to be
  73. * faster than two daifclr operations, since writes to this register
  74. * are self-synchronising.
  75. */
  76. .macro enable_dbg_and_irq
  77. msr daifclr, #(8 | 2)
  78. .endm
  79. /*
  80. * SMP data memory barrier
  81. */
  82. .macro smp_dmb, opt
  83. dmb \opt
  84. .endm
  85. #define USER(l, x...) \
  86. 9999: x; \
  87. .section __ex_table,"a"; \
  88. .align 3; \
  89. .quad 9999b,l; \
  90. .previous
  91. /*
  92. * Register aliases.
  93. */
  94. lr .req x30 // link register
  95. /*
  96. * Vector entry
  97. */
  98. .macro ventry label
  99. .align 7
  100. b \label
  101. .endm
  102. /*
  103. * Select code when configured for BE.
  104. */
  105. #ifdef CONFIG_CPU_BIG_ENDIAN
  106. #define CPU_BE(code...) code
  107. #else
  108. #define CPU_BE(code...)
  109. #endif
  110. /*
  111. * Select code when configured for LE.
  112. */
  113. #ifdef CONFIG_CPU_BIG_ENDIAN
  114. #define CPU_LE(code...)
  115. #else
  116. #define CPU_LE(code...) code
  117. #endif
  118. /*
  119. * Define a macro that constructs a 64-bit value by concatenating two
  120. * 32-bit registers. Note that on big endian systems the order of the
  121. * registers is swapped.
  122. */
  123. #ifndef CONFIG_CPU_BIG_ENDIAN
  124. .macro regs_to_64, rd, lbits, hbits
  125. #else
  126. .macro regs_to_64, rd, hbits, lbits
  127. #endif
  128. orr \rd, \lbits, \hbits, lsl #32
  129. .endm
  130. /*
  131. * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
  132. * <symbol> is within the range +/- 4 GB of the PC.
  133. */
  134. /*
  135. * @dst: destination register (64 bit wide)
  136. * @sym: name of the symbol
  137. * @tmp: optional scratch register to be used if <dst> == sp, which
  138. * is not allowed in an adrp instruction
  139. */
  140. .macro adr_l, dst, sym, tmp=
  141. .ifb \tmp
  142. adrp \dst, \sym
  143. add \dst, \dst, :lo12:\sym
  144. .else
  145. adrp \tmp, \sym
  146. add \dst, \tmp, :lo12:\sym
  147. .endif
  148. .endm
  149. /*
  150. * @dst: destination register (32 or 64 bit wide)
  151. * @sym: name of the symbol
  152. * @tmp: optional 64-bit scratch register to be used if <dst> is a
  153. * 32-bit wide register, in which case it cannot be used to hold
  154. * the address
  155. */
  156. .macro ldr_l, dst, sym, tmp=
  157. .ifb \tmp
  158. adrp \dst, \sym
  159. ldr \dst, [\dst, :lo12:\sym]
  160. .else
  161. adrp \tmp, \sym
  162. ldr \dst, [\tmp, :lo12:\sym]
  163. .endif
  164. .endm
  165. /*
  166. * @src: source register (32 or 64 bit wide)
  167. * @sym: name of the symbol
  168. * @tmp: mandatory 64-bit scratch register to calculate the address
  169. * while <src> needs to be preserved.
  170. */
  171. .macro str_l, src, sym, tmp
  172. adrp \tmp, \sym
  173. str \src, [\tmp, :lo12:\sym]
  174. .endm
  175. /*
  176. * Annotate a function as position independent, i.e., safe to be called before
  177. * the kernel virtual mapping is activated.
  178. */
  179. #define ENDPIPROC(x) \
  180. .globl __pi_##x; \
  181. .type __pi_##x, %function; \
  182. .set __pi_##x, x; \
  183. .size __pi_##x, . - x; \
  184. ENDPROC(x)
  185. /*
  186. * mov_q - move an immediate constant into a 64-bit register using
  187. * between 2 and 4 movz/movk instructions (depending on the
  188. * magnitude and sign of the operand)
  189. */
  190. .macro mov_q, reg, val
  191. .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
  192. movz \reg, :abs_g1_s:\val
  193. .else
  194. .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
  195. movz \reg, :abs_g2_s:\val
  196. .else
  197. movz \reg, :abs_g3:\val
  198. movk \reg, :abs_g2_nc:\val
  199. .endif
  200. movk \reg, :abs_g1_nc:\val
  201. .endif
  202. movk \reg, :abs_g0_nc:\val
  203. .endm
  204. /*
  205. * Check the MIDR_EL1 of the current CPU for a given model and a range of
  206. * variant/revision. See asm/cputype.h for the macros used below.
  207. *
  208. * model: MIDR_CPU_PART of CPU
  209. * rv_min: Minimum of MIDR_CPU_VAR_REV()
  210. * rv_max: Maximum of MIDR_CPU_VAR_REV()
  211. * res: Result register.
  212. * tmp1, tmp2, tmp3: Temporary registers
  213. *
  214. * Corrupts: res, tmp1, tmp2, tmp3
  215. * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
  216. */
  217. .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
  218. mrs \res, midr_el1
  219. mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
  220. mov_q \tmp2, MIDR_CPU_PART_MASK
  221. and \tmp3, \res, \tmp2 // Extract model
  222. and \tmp1, \res, \tmp1 // rev & variant
  223. mov_q \tmp2, \model
  224. cmp \tmp3, \tmp2
  225. cset \res, eq
  226. cbz \res, .Ldone\@ // Model matches ?
  227. .if (\rv_min != 0) // Skip min check if rv_min == 0
  228. mov_q \tmp3, \rv_min
  229. cmp \tmp1, \tmp3
  230. cset \res, ge
  231. .endif // \rv_min != 0
  232. /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
  233. .if ((\rv_min != \rv_max) || \rv_min == 0)
  234. mov_q \tmp2, \rv_max
  235. cmp \tmp1, \tmp2
  236. cset \tmp2, le
  237. and \res, \res, \tmp2
  238. .endif
  239. .Ldone\@:
  240. .endm
  241. #endif /* __ASM_ASSEMBLER_H */