atomic_32.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * Do not include directly; use <linux/atomic.h>.
  15. */
  16. #ifndef _ASM_TILE_ATOMIC_32_H
  17. #define _ASM_TILE_ATOMIC_32_H
  18. #include <asm/barrier.h>
  19. #include <arch/chip.h>
  20. #ifndef __ASSEMBLY__
  21. /**
  22. * atomic_add - add integer to atomic variable
  23. * @i: integer value to add
  24. * @v: pointer of type atomic_t
  25. *
  26. * Atomically adds @i to @v.
  27. */
  28. static inline void atomic_add(int i, atomic_t *v)
  29. {
  30. _atomic_xchg_add(&v->counter, i);
  31. }
  32. #define ATOMIC_OP(op) \
  33. unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
  34. static inline void atomic_##op(int i, atomic_t *v) \
  35. { \
  36. _atomic_##op((unsigned long *)&v->counter, i); \
  37. }
  38. ATOMIC_OP(and)
  39. ATOMIC_OP(or)
  40. ATOMIC_OP(xor)
  41. #undef ATOMIC_OP
  42. /**
  43. * atomic_add_return - add integer and return
  44. * @v: pointer of type atomic_t
  45. * @i: integer value to add
  46. *
  47. * Atomically adds @i to @v and returns @i + @v
  48. */
  49. static inline int atomic_add_return(int i, atomic_t *v)
  50. {
  51. smp_mb(); /* barrier for proper semantics */
  52. return _atomic_xchg_add(&v->counter, i) + i;
  53. }
  54. /**
  55. * __atomic_add_unless - add unless the number is already a given value
  56. * @v: pointer of type atomic_t
  57. * @a: the amount to add to v...
  58. * @u: ...unless v is equal to u.
  59. *
  60. * Atomically adds @a to @v, so long as @v was not already @u.
  61. * Returns the old value of @v.
  62. */
  63. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  64. {
  65. smp_mb(); /* barrier for proper semantics */
  66. return _atomic_xchg_add_unless(&v->counter, a, u);
  67. }
  68. /**
  69. * atomic_set - set atomic variable
  70. * @v: pointer of type atomic_t
  71. * @i: required value
  72. *
  73. * Atomically sets the value of @v to @i.
  74. *
  75. * atomic_set() can't be just a raw store, since it would be lost if it
  76. * fell between the load and store of one of the other atomic ops.
  77. */
  78. static inline void atomic_set(atomic_t *v, int n)
  79. {
  80. _atomic_xchg(&v->counter, n);
  81. }
  82. /* A 64bit atomic type */
  83. typedef struct {
  84. long long counter;
  85. } atomic64_t;
  86. #define ATOMIC64_INIT(val) { (val) }
  87. /**
  88. * atomic64_read - read atomic variable
  89. * @v: pointer of type atomic64_t
  90. *
  91. * Atomically reads the value of @v.
  92. */
  93. static inline long long atomic64_read(const atomic64_t *v)
  94. {
  95. /*
  96. * Requires an atomic op to read both 32-bit parts consistently.
  97. * Casting away const is safe since the atomic support routines
  98. * do not write to memory if the value has not been modified.
  99. */
  100. return _atomic64_xchg_add((long long *)&v->counter, 0);
  101. }
  102. /**
  103. * atomic64_add - add integer to atomic variable
  104. * @i: integer value to add
  105. * @v: pointer of type atomic64_t
  106. *
  107. * Atomically adds @i to @v.
  108. */
  109. static inline void atomic64_add(long long i, atomic64_t *v)
  110. {
  111. _atomic64_xchg_add(&v->counter, i);
  112. }
  113. #define ATOMIC64_OP(op) \
  114. long long _atomic64_##op(long long *v, long long n); \
  115. static inline void atomic64_##op(long long i, atomic64_t *v) \
  116. { \
  117. _atomic64_##op(&v->counter, i); \
  118. }
  119. ATOMIC64_OP(and)
  120. ATOMIC64_OP(or)
  121. ATOMIC64_OP(xor)
  122. /**
  123. * atomic64_add_return - add integer and return
  124. * @v: pointer of type atomic64_t
  125. * @i: integer value to add
  126. *
  127. * Atomically adds @i to @v and returns @i + @v
  128. */
  129. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  130. {
  131. smp_mb(); /* barrier for proper semantics */
  132. return _atomic64_xchg_add(&v->counter, i) + i;
  133. }
  134. /**
  135. * atomic64_add_unless - add unless the number is already a given value
  136. * @v: pointer of type atomic64_t
  137. * @a: the amount to add to v...
  138. * @u: ...unless v is equal to u.
  139. *
  140. * Atomically adds @a to @v, so long as @v was not already @u.
  141. * Returns non-zero if @v was not @u, and zero otherwise.
  142. */
  143. static inline long long atomic64_add_unless(atomic64_t *v, long long a,
  144. long long u)
  145. {
  146. smp_mb(); /* barrier for proper semantics */
  147. return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
  148. }
  149. /**
  150. * atomic64_set - set atomic variable
  151. * @v: pointer of type atomic64_t
  152. * @i: required value
  153. *
  154. * Atomically sets the value of @v to @i.
  155. *
  156. * atomic64_set() can't be just a raw store, since it would be lost if it
  157. * fell between the load and store of one of the other atomic ops.
  158. */
  159. static inline void atomic64_set(atomic64_t *v, long long n)
  160. {
  161. _atomic64_xchg(&v->counter, n);
  162. }
  163. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  164. #define atomic64_inc(v) atomic64_add(1LL, (v))
  165. #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
  166. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  167. #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
  168. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  169. #define atomic64_sub(i, v) atomic64_add(-(i), (v))
  170. #define atomic64_dec(v) atomic64_sub(1LL, (v))
  171. #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
  172. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  173. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
  174. #endif /* !__ASSEMBLY__ */
  175. /*
  176. * Internal definitions only beyond this point.
  177. */
  178. /*
  179. * Number of atomic locks in atomic_locks[]. Must be a power of two.
  180. * There is no reason for more than PAGE_SIZE / 8 entries, since that
  181. * is the maximum number of pointer bits we can use to index this.
  182. * And we cannot have more than PAGE_SIZE / 4, since this has to
  183. * fit on a single page and each entry takes 4 bytes.
  184. */
  185. #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
  186. #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
  187. #ifndef __ASSEMBLY__
  188. extern int atomic_locks[];
  189. #endif
  190. /*
  191. * All the code that may fault while holding an atomic lock must
  192. * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
  193. * can correctly release and reacquire the lock. Note that we
  194. * mention the register number in a comment in "lib/atomic_asm.S" to help
  195. * assembly coders from using this register by mistake, so if it
  196. * is changed here, change that comment as well.
  197. */
  198. #define ATOMIC_LOCK_REG 20
  199. #define ATOMIC_LOCK_REG_NAME r20
  200. #ifndef __ASSEMBLY__
  201. /* Called from setup to initialize a hash table to point to per_cpu locks. */
  202. void __init_atomic_per_cpu(void);
  203. #ifdef CONFIG_SMP
  204. /* Support releasing the atomic lock in do_page_fault_ics(). */
  205. void __atomic_fault_unlock(int *lock_ptr);
  206. #endif
  207. /* Return a pointer to the lock for the given address. */
  208. int *__atomic_hashed_lock(volatile void *v);
  209. /* Private helper routines in lib/atomic_asm_32.S */
  210. struct __get_user {
  211. unsigned long val;
  212. int err;
  213. };
  214. extern struct __get_user __atomic_cmpxchg(volatile int *p,
  215. int *lock, int o, int n);
  216. extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
  217. extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
  218. extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
  219. int *lock, int o, int n);
  220. extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
  221. extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
  222. extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
  223. extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
  224. extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
  225. long long o, long long n);
  226. extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
  227. extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
  228. long long n);
  229. extern long long __atomic64_xchg_add_unless(volatile long long *p,
  230. int *lock, long long o, long long n);
  231. extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
  232. extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
  233. extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
  234. /* Return failure from the atomic wrappers. */
  235. struct __get_user __atomic_bad_address(int __user *addr);
  236. #endif /* !__ASSEMBLY__ */
  237. #endif /* _ASM_TILE_ATOMIC_32_H */