atomic.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef _ASM_ARC_ATOMIC_H
  9. #define _ASM_ARC_ATOMIC_H
  10. #ifndef __ASSEMBLY__
  11. #include <linux/types.h>
  12. #include <linux/compiler.h>
  13. #include <asm/cmpxchg.h>
  14. #include <asm/barrier.h>
  15. #include <asm/smp.h>
  16. #define atomic_read(v) READ_ONCE((v)->counter)
  17. #ifdef CONFIG_ARC_HAS_LLSC
  18. #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
  19. #ifdef CONFIG_ARC_STAR_9000923308
  20. #define SCOND_FAIL_RETRY_VAR_DEF \
  21. unsigned int delay = 1, tmp; \
  22. #define SCOND_FAIL_RETRY_ASM \
  23. " bz 4f \n" \
  24. " ; --- scond fail delay --- \n" \
  25. " mov %[tmp], %[delay] \n" /* tmp = delay */ \
  26. "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
  27. " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
  28. " rol %[delay], %[delay] \n" /* delay *= 2 */ \
  29. " b 1b \n" /* start over */ \
  30. "4: ; --- success --- \n" \
  31. #define SCOND_FAIL_RETRY_VARS \
  32. ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
  33. #else /* !CONFIG_ARC_STAR_9000923308 */
  34. #define SCOND_FAIL_RETRY_VAR_DEF
  35. #define SCOND_FAIL_RETRY_ASM \
  36. " bnz 1b \n" \
  37. #define SCOND_FAIL_RETRY_VARS
  38. #endif
  39. #define ATOMIC_OP(op, c_op, asm_op) \
  40. static inline void atomic_##op(int i, atomic_t *v) \
  41. { \
  42. unsigned int val; \
  43. SCOND_FAIL_RETRY_VAR_DEF \
  44. \
  45. __asm__ __volatile__( \
  46. "1: llock %[val], [%[ctr]] \n" \
  47. " " #asm_op " %[val], %[val], %[i] \n" \
  48. " scond %[val], [%[ctr]] \n" \
  49. " \n" \
  50. SCOND_FAIL_RETRY_ASM \
  51. \
  52. : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
  53. SCOND_FAIL_RETRY_VARS \
  54. : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
  55. [i] "ir" (i) \
  56. : "cc"); \
  57. } \
  58. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  59. static inline int atomic_##op##_return(int i, atomic_t *v) \
  60. { \
  61. unsigned int val; \
  62. SCOND_FAIL_RETRY_VAR_DEF \
  63. \
  64. /* \
  65. * Explicit full memory barrier needed before/after as \
  66. * LLOCK/SCOND thmeselves don't provide any such semantics \
  67. */ \
  68. smp_mb(); \
  69. \
  70. __asm__ __volatile__( \
  71. "1: llock %[val], [%[ctr]] \n" \
  72. " " #asm_op " %[val], %[val], %[i] \n" \
  73. " scond %[val], [%[ctr]] \n" \
  74. " \n" \
  75. SCOND_FAIL_RETRY_ASM \
  76. \
  77. : [val] "=&r" (val) \
  78. SCOND_FAIL_RETRY_VARS \
  79. : [ctr] "r" (&v->counter), \
  80. [i] "ir" (i) \
  81. : "cc"); \
  82. \
  83. smp_mb(); \
  84. \
  85. return val; \
  86. }
  87. #else /* !CONFIG_ARC_HAS_LLSC */
  88. #ifndef CONFIG_SMP
  89. /* violating atomic_xxx API locking protocol in UP for optimization sake */
  90. #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
  91. #else
  92. static inline void atomic_set(atomic_t *v, int i)
  93. {
  94. /*
  95. * Independent of hardware support, all of the atomic_xxx() APIs need
  96. * to follow the same locking rules to make sure that a "hardware"
  97. * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
  98. * sequence
  99. *
  100. * Thus atomic_set() despite being 1 insn (and seemingly atomic)
  101. * requires the locking.
  102. */
  103. unsigned long flags;
  104. atomic_ops_lock(flags);
  105. WRITE_ONCE(v->counter, i);
  106. atomic_ops_unlock(flags);
  107. }
  108. #endif
  109. /*
  110. * Non hardware assisted Atomic-R-M-W
  111. * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
  112. */
  113. #define ATOMIC_OP(op, c_op, asm_op) \
  114. static inline void atomic_##op(int i, atomic_t *v) \
  115. { \
  116. unsigned long flags; \
  117. \
  118. atomic_ops_lock(flags); \
  119. v->counter c_op i; \
  120. atomic_ops_unlock(flags); \
  121. }
  122. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  123. static inline int atomic_##op##_return(int i, atomic_t *v) \
  124. { \
  125. unsigned long flags; \
  126. unsigned long temp; \
  127. \
  128. /* \
  129. * spin lock/unlock provides the needed smp_mb() before/after \
  130. */ \
  131. atomic_ops_lock(flags); \
  132. temp = v->counter; \
  133. temp c_op i; \
  134. v->counter = temp; \
  135. atomic_ops_unlock(flags); \
  136. \
  137. return temp; \
  138. }
  139. #endif /* !CONFIG_ARC_HAS_LLSC */
  140. #define ATOMIC_OPS(op, c_op, asm_op) \
  141. ATOMIC_OP(op, c_op, asm_op) \
  142. ATOMIC_OP_RETURN(op, c_op, asm_op)
  143. ATOMIC_OPS(add, +=, add)
  144. ATOMIC_OPS(sub, -=, sub)
  145. #define atomic_andnot atomic_andnot
  146. ATOMIC_OP(and, &=, and)
  147. ATOMIC_OP(andnot, &= ~, bic)
  148. ATOMIC_OP(or, |=, or)
  149. ATOMIC_OP(xor, ^=, xor)
  150. #undef ATOMIC_OPS
  151. #undef ATOMIC_OP_RETURN
  152. #undef ATOMIC_OP
  153. #undef SCOND_FAIL_RETRY_VAR_DEF
  154. #undef SCOND_FAIL_RETRY_ASM
  155. #undef SCOND_FAIL_RETRY_VARS
  156. /**
  157. * __atomic_add_unless - add unless the number is a given value
  158. * @v: pointer of type atomic_t
  159. * @a: the amount to add to v...
  160. * @u: ...unless v is equal to u.
  161. *
  162. * Atomically adds @a to @v, so long as it was not @u.
  163. * Returns the old value of @v
  164. */
  165. #define __atomic_add_unless(v, a, u) \
  166. ({ \
  167. int c, old; \
  168. \
  169. /* \
  170. * Explicit full memory barrier needed before/after as \
  171. * LLOCK/SCOND thmeselves don't provide any such semantics \
  172. */ \
  173. smp_mb(); \
  174. \
  175. c = atomic_read(v); \
  176. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
  177. c = old; \
  178. \
  179. smp_mb(); \
  180. \
  181. c; \
  182. })
  183. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  184. #define atomic_inc(v) atomic_add(1, v)
  185. #define atomic_dec(v) atomic_sub(1, v)
  186. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  187. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  188. #define atomic_inc_return(v) atomic_add_return(1, (v))
  189. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  190. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  191. #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
  192. #define ATOMIC_INIT(i) { (i) }
  193. #include <asm-generic/atomic64.h>
  194. #endif
  195. #endif