cmpxchg.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /*
  2. * Based on arch/arm/include/asm/cmpxchg.h
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #ifndef __ASM_CMPXCHG_H
  19. #define __ASM_CMPXCHG_H
  20. #include <linux/bug.h>
  21. #include <linux/mmdebug.h>
  22. #include <asm/atomic.h>
  23. #include <asm/barrier.h>
  24. #include <asm/lse.h>
  25. /*
  26. * We need separate acquire parameters for ll/sc and lse, since the full
  27. * barrier case is generated as release+dmb for the former and
  28. * acquire+release for the latter.
  29. */
  30. #define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
  31. static inline unsigned long __xchg_case_##name(unsigned long x, \
  32. volatile void *ptr) \
  33. { \
  34. unsigned long ret, tmp; \
  35. \
  36. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  37. /* LL/SC */ \
  38. " prfm pstl1strm, %2\n" \
  39. "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
  40. " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
  41. " cbnz %w1, 1b\n" \
  42. " " #mb, \
  43. /* LSE atomics */ \
  44. " nop\n" \
  45. " nop\n" \
  46. " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
  47. " nop\n" \
  48. " " #nop_lse) \
  49. : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
  50. : "r" (x) \
  51. : cl); \
  52. \
  53. return ret; \
  54. }
  55. __XCHG_CASE(w, b, 1, , , , , , )
  56. __XCHG_CASE(w, h, 2, , , , , , )
  57. __XCHG_CASE(w, , 4, , , , , , )
  58. __XCHG_CASE( , , 8, , , , , , )
  59. __XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
  60. __XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
  61. __XCHG_CASE(w, , acq_4, , , a, a, , "memory")
  62. __XCHG_CASE( , , acq_8, , , a, a, , "memory")
  63. __XCHG_CASE(w, b, rel_1, , , , , l, "memory")
  64. __XCHG_CASE(w, h, rel_2, , , , , l, "memory")
  65. __XCHG_CASE(w, , rel_4, , , , , l, "memory")
  66. __XCHG_CASE( , , rel_8, , , , , l, "memory")
  67. __XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
  68. __XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
  69. __XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
  70. __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
  71. #undef __XCHG_CASE
  72. #define __XCHG_GEN(sfx) \
  73. static inline unsigned long __xchg##sfx(unsigned long x, \
  74. volatile void *ptr, \
  75. int size) \
  76. { \
  77. switch (size) { \
  78. case 1: \
  79. return __xchg_case##sfx##_1(x, ptr); \
  80. case 2: \
  81. return __xchg_case##sfx##_2(x, ptr); \
  82. case 4: \
  83. return __xchg_case##sfx##_4(x, ptr); \
  84. case 8: \
  85. return __xchg_case##sfx##_8(x, ptr); \
  86. default: \
  87. BUILD_BUG(); \
  88. } \
  89. \
  90. unreachable(); \
  91. }
  92. __XCHG_GEN()
  93. __XCHG_GEN(_acq)
  94. __XCHG_GEN(_rel)
  95. __XCHG_GEN(_mb)
  96. #undef __XCHG_GEN
  97. #define __xchg_wrapper(sfx, ptr, x) \
  98. ({ \
  99. __typeof__(*(ptr)) __ret; \
  100. __ret = (__typeof__(*(ptr))) \
  101. __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
  102. __ret; \
  103. })
  104. /* xchg */
  105. #define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
  106. #define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
  107. #define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
  108. #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
  109. #define __CMPXCHG_GEN(sfx) \
  110. static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
  111. unsigned long old, \
  112. unsigned long new, \
  113. int size) \
  114. { \
  115. switch (size) { \
  116. case 1: \
  117. return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
  118. case 2: \
  119. return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
  120. case 4: \
  121. return __cmpxchg_case##sfx##_4(ptr, old, new); \
  122. case 8: \
  123. return __cmpxchg_case##sfx##_8(ptr, old, new); \
  124. default: \
  125. BUILD_BUG(); \
  126. } \
  127. \
  128. unreachable(); \
  129. }
  130. __CMPXCHG_GEN()
  131. __CMPXCHG_GEN(_acq)
  132. __CMPXCHG_GEN(_rel)
  133. __CMPXCHG_GEN(_mb)
  134. #undef __CMPXCHG_GEN
  135. #define __cmpxchg_wrapper(sfx, ptr, o, n) \
  136. ({ \
  137. __typeof__(*(ptr)) __ret; \
  138. __ret = (__typeof__(*(ptr))) \
  139. __cmpxchg##sfx((ptr), (unsigned long)(o), \
  140. (unsigned long)(n), sizeof(*(ptr))); \
  141. __ret; \
  142. })
  143. /* cmpxchg */
  144. #define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
  145. #define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
  146. #define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
  147. #define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
  148. #define cmpxchg_local cmpxchg_relaxed
  149. /* cmpxchg64 */
  150. #define cmpxchg64_relaxed cmpxchg_relaxed
  151. #define cmpxchg64_acquire cmpxchg_acquire
  152. #define cmpxchg64_release cmpxchg_release
  153. #define cmpxchg64 cmpxchg
  154. #define cmpxchg64_local cmpxchg_local
  155. /* cmpxchg_double */
  156. #define system_has_cmpxchg_double() 1
  157. #define __cmpxchg_double_check(ptr1, ptr2) \
  158. ({ \
  159. if (sizeof(*(ptr1)) != 8) \
  160. BUILD_BUG(); \
  161. VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
  162. })
  163. #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
  164. ({\
  165. int __ret;\
  166. __cmpxchg_double_check(ptr1, ptr2); \
  167. __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
  168. (unsigned long)(n1), (unsigned long)(n2), \
  169. ptr1); \
  170. __ret; \
  171. })
  172. #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
  173. ({\
  174. int __ret;\
  175. __cmpxchg_double_check(ptr1, ptr2); \
  176. __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
  177. (unsigned long)(n1), (unsigned long)(n2), \
  178. ptr1); \
  179. __ret; \
  180. })
  181. /* this_cpu_cmpxchg */
  182. #define _protect_cmpxchg_local(pcp, o, n) \
  183. ({ \
  184. typeof(*raw_cpu_ptr(&(pcp))) __ret; \
  185. preempt_disable(); \
  186. __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
  187. preempt_enable(); \
  188. __ret; \
  189. })
  190. #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  191. #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  192. #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  193. #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  194. #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
  195. ({ \
  196. int __ret; \
  197. preempt_disable(); \
  198. __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
  199. raw_cpu_ptr(&(ptr2)), \
  200. o1, o2, n1, n2); \
  201. preempt_enable(); \
  202. __ret; \
  203. })
  204. #endif /* __ASM_CMPXCHG_H */