atomic_ll_sc.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /*
  2. * Based on arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef __ASM_ATOMIC_LL_SC_H
  21. #define __ASM_ATOMIC_LL_SC_H
  22. #ifndef __ARM64_IN_ATOMIC_IMPL
  23. #error "please don't include this file directly"
  24. #endif
  25. /*
  26. * AArch64 UP and SMP safe atomic ops. We use load exclusive and
  27. * store exclusive to ensure that these are atomic. We may loop
  28. * to ensure that the update happens.
  29. *
  30. * NOTE: these functions do *not* follow the PCS and must explicitly
  31. * save any clobbered registers other than x0 (regardless of return
  32. * value). This is achieved through -fcall-saved-* compiler flags for
  33. * this file, which unfortunately don't work on a per-function basis
  34. * (the optimize attribute silently ignores these options).
  35. */
  36. #define ATOMIC_OP(op, asm_op) \
  37. __LL_SC_INLINE void \
  38. __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
  39. { \
  40. unsigned long tmp; \
  41. int result; \
  42. \
  43. asm volatile("// atomic_" #op "\n" \
  44. " prfm pstl1strm, %2\n" \
  45. "1: ldxr %w0, %2\n" \
  46. " " #asm_op " %w0, %w0, %w3\n" \
  47. " stxr %w1, %w0, %2\n" \
  48. " cbnz %w1, 1b" \
  49. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  50. : "Ir" (i)); \
  51. } \
  52. __LL_SC_EXPORT(atomic_##op);
  53. #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
  54. __LL_SC_INLINE int \
  55. __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
  56. { \
  57. unsigned long tmp; \
  58. int result; \
  59. \
  60. asm volatile("// atomic_" #op "_return" #name "\n" \
  61. " prfm pstl1strm, %2\n" \
  62. "1: ld" #acq "xr %w0, %2\n" \
  63. " " #asm_op " %w0, %w0, %w3\n" \
  64. " st" #rel "xr %w1, %w0, %2\n" \
  65. " cbnz %w1, 1b\n" \
  66. " " #mb \
  67. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  68. : "Ir" (i) \
  69. : cl); \
  70. \
  71. return result; \
  72. } \
  73. __LL_SC_EXPORT(atomic_##op##_return##name);
  74. #define ATOMIC_OPS(...) \
  75. ATOMIC_OP(__VA_ARGS__) \
  76. ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)
  77. #define ATOMIC_OPS_RLX(...) \
  78. ATOMIC_OPS(__VA_ARGS__) \
  79. ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
  80. ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
  81. ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)
  82. ATOMIC_OPS_RLX(add, add)
  83. ATOMIC_OPS_RLX(sub, sub)
  84. ATOMIC_OP(and, and)
  85. ATOMIC_OP(andnot, bic)
  86. ATOMIC_OP(or, orr)
  87. ATOMIC_OP(xor, eor)
  88. #undef ATOMIC_OPS_RLX
  89. #undef ATOMIC_OPS
  90. #undef ATOMIC_OP_RETURN
  91. #undef ATOMIC_OP
  92. #define ATOMIC64_OP(op, asm_op) \
  93. __LL_SC_INLINE void \
  94. __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
  95. { \
  96. long result; \
  97. unsigned long tmp; \
  98. \
  99. asm volatile("// atomic64_" #op "\n" \
  100. " prfm pstl1strm, %2\n" \
  101. "1: ldxr %0, %2\n" \
  102. " " #asm_op " %0, %0, %3\n" \
  103. " stxr %w1, %0, %2\n" \
  104. " cbnz %w1, 1b" \
  105. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  106. : "Ir" (i)); \
  107. } \
  108. __LL_SC_EXPORT(atomic64_##op);
  109. #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
  110. __LL_SC_INLINE long \
  111. __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
  112. { \
  113. long result; \
  114. unsigned long tmp; \
  115. \
  116. asm volatile("// atomic64_" #op "_return" #name "\n" \
  117. " prfm pstl1strm, %2\n" \
  118. "1: ld" #acq "xr %0, %2\n" \
  119. " " #asm_op " %0, %0, %3\n" \
  120. " st" #rel "xr %w1, %0, %2\n" \
  121. " cbnz %w1, 1b\n" \
  122. " " #mb \
  123. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  124. : "Ir" (i) \
  125. : cl); \
  126. \
  127. return result; \
  128. } \
  129. __LL_SC_EXPORT(atomic64_##op##_return##name);
  130. #define ATOMIC64_OPS(...) \
  131. ATOMIC64_OP(__VA_ARGS__) \
  132. ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__)
  133. #define ATOMIC64_OPS_RLX(...) \
  134. ATOMIC64_OPS(__VA_ARGS__) \
  135. ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
  136. ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
  137. ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__)
  138. ATOMIC64_OPS_RLX(add, add)
  139. ATOMIC64_OPS_RLX(sub, sub)
  140. ATOMIC64_OP(and, and)
  141. ATOMIC64_OP(andnot, bic)
  142. ATOMIC64_OP(or, orr)
  143. ATOMIC64_OP(xor, eor)
  144. #undef ATOMIC64_OPS_RLX
  145. #undef ATOMIC64_OPS
  146. #undef ATOMIC64_OP_RETURN
  147. #undef ATOMIC64_OP
  148. __LL_SC_INLINE long
  149. __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
  150. {
  151. long result;
  152. unsigned long tmp;
  153. asm volatile("// atomic64_dec_if_positive\n"
  154. " prfm pstl1strm, %2\n"
  155. "1: ldxr %0, %2\n"
  156. " subs %0, %0, #1\n"
  157. " b.lt 2f\n"
  158. " stlxr %w1, %0, %2\n"
  159. " cbnz %w1, 1b\n"
  160. " dmb ish\n"
  161. "2:"
  162. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  163. :
  164. : "cc", "memory");
  165. return result;
  166. }
  167. __LL_SC_EXPORT(atomic64_dec_if_positive);
  168. #define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
  169. __LL_SC_INLINE unsigned long \
  170. __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
  171. unsigned long old, \
  172. unsigned long new)) \
  173. { \
  174. unsigned long tmp, oldval; \
  175. \
  176. asm volatile( \
  177. " prfm pstl1strm, %[v]\n" \
  178. "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
  179. " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
  180. " cbnz %" #w "[tmp], 2f\n" \
  181. " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
  182. " cbnz %w[tmp], 1b\n" \
  183. " " #mb "\n" \
  184. " mov %" #w "[oldval], %" #w "[old]\n" \
  185. "2:" \
  186. : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
  187. [v] "+Q" (*(unsigned long *)ptr) \
  188. : [old] "Lr" (old), [new] "r" (new) \
  189. : cl); \
  190. \
  191. return oldval; \
  192. } \
  193. __LL_SC_EXPORT(__cmpxchg_case_##name);
  194. __CMPXCHG_CASE(w, b, 1, , , , )
  195. __CMPXCHG_CASE(w, h, 2, , , , )
  196. __CMPXCHG_CASE(w, , 4, , , , )
  197. __CMPXCHG_CASE( , , 8, , , , )
  198. __CMPXCHG_CASE(w, b, acq_1, , a, , "memory")
  199. __CMPXCHG_CASE(w, h, acq_2, , a, , "memory")
  200. __CMPXCHG_CASE(w, , acq_4, , a, , "memory")
  201. __CMPXCHG_CASE( , , acq_8, , a, , "memory")
  202. __CMPXCHG_CASE(w, b, rel_1, , , l, "memory")
  203. __CMPXCHG_CASE(w, h, rel_2, , , l, "memory")
  204. __CMPXCHG_CASE(w, , rel_4, , , l, "memory")
  205. __CMPXCHG_CASE( , , rel_8, , , l, "memory")
  206. __CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory")
  207. __CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory")
  208. __CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory")
  209. __CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
  210. #undef __CMPXCHG_CASE
  211. #define __CMPXCHG_DBL(name, mb, rel, cl) \
  212. __LL_SC_INLINE long \
  213. __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
  214. unsigned long old2, \
  215. unsigned long new1, \
  216. unsigned long new2, \
  217. volatile void *ptr)) \
  218. { \
  219. unsigned long tmp, ret; \
  220. \
  221. asm volatile("// __cmpxchg_double" #name "\n" \
  222. " prfm pstl1strm, %2\n" \
  223. "1: ldxp %0, %1, %2\n" \
  224. " eor %0, %0, %3\n" \
  225. " eor %1, %1, %4\n" \
  226. " orr %1, %0, %1\n" \
  227. " cbnz %1, 2f\n" \
  228. " st" #rel "xp %w0, %5, %6, %2\n" \
  229. " cbnz %w0, 1b\n" \
  230. " " #mb "\n" \
  231. "2:" \
  232. : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
  233. : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
  234. : cl); \
  235. \
  236. return ret; \
  237. } \
  238. __LL_SC_EXPORT(__cmpxchg_double##name);
  239. __CMPXCHG_DBL( , , , )
  240. __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
  241. #undef __CMPXCHG_DBL
  242. #endif /* __ASM_ATOMIC_LL_SC_H */