atomic.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. /* Atomic operations usable in machine independent code */
  2. #ifndef _LINUX_ATOMIC_H
  3. #define _LINUX_ATOMIC_H
  4. #include <asm/atomic.h>
  5. #include <asm/barrier.h>
  6. /*
  7. * Relaxed variants of xchg, cmpxchg and some atomic operations.
  8. *
  9. * We support four variants:
  10. *
  11. * - Fully ordered: The default implementation, no suffix required.
  12. * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
  13. * - Release: Provides RELEASE semantics, _release suffix.
  14. * - Relaxed: No ordering guarantees, _relaxed suffix.
  15. *
  16. * For compound atomics performing both a load and a store, ACQUIRE
  17. * semantics apply only to the load and RELEASE semantics only to the
  18. * store portion of the operation. Note that a failed cmpxchg_acquire
  19. * does -not- imply any memory ordering constraints.
  20. *
  21. * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
  22. */
  23. #ifndef atomic_read_acquire
  24. #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
  25. #endif
  26. #ifndef atomic_set_release
  27. #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
  28. #endif
  29. /*
  30. * The idea here is to build acquire/release variants by adding explicit
  31. * barriers on top of the relaxed variant. In the case where the relaxed
  32. * variant is already fully ordered, no additional barriers are needed.
  33. */
  34. #define __atomic_op_acquire(op, args...) \
  35. ({ \
  36. typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
  37. smp_mb__after_atomic(); \
  38. __ret; \
  39. })
  40. #define __atomic_op_release(op, args...) \
  41. ({ \
  42. smp_mb__before_atomic(); \
  43. op##_relaxed(args); \
  44. })
  45. #define __atomic_op_fence(op, args...) \
  46. ({ \
  47. typeof(op##_relaxed(args)) __ret; \
  48. smp_mb__before_atomic(); \
  49. __ret = op##_relaxed(args); \
  50. smp_mb__after_atomic(); \
  51. __ret; \
  52. })
  53. /* atomic_add_return_relaxed */
  54. #ifndef atomic_add_return_relaxed
  55. #define atomic_add_return_relaxed atomic_add_return
  56. #define atomic_add_return_acquire atomic_add_return
  57. #define atomic_add_return_release atomic_add_return
  58. #else /* atomic_add_return_relaxed */
  59. #ifndef atomic_add_return_acquire
  60. #define atomic_add_return_acquire(...) \
  61. __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
  62. #endif
  63. #ifndef atomic_add_return_release
  64. #define atomic_add_return_release(...) \
  65. __atomic_op_release(atomic_add_return, __VA_ARGS__)
  66. #endif
  67. #ifndef atomic_add_return
  68. #define atomic_add_return(...) \
  69. __atomic_op_fence(atomic_add_return, __VA_ARGS__)
  70. #endif
  71. #endif /* atomic_add_return_relaxed */
  72. /* atomic_inc_return_relaxed */
  73. #ifndef atomic_inc_return_relaxed
  74. #define atomic_inc_return_relaxed atomic_inc_return
  75. #define atomic_inc_return_acquire atomic_inc_return
  76. #define atomic_inc_return_release atomic_inc_return
  77. #else /* atomic_inc_return_relaxed */
  78. #ifndef atomic_inc_return_acquire
  79. #define atomic_inc_return_acquire(...) \
  80. __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
  81. #endif
  82. #ifndef atomic_inc_return_release
  83. #define atomic_inc_return_release(...) \
  84. __atomic_op_release(atomic_inc_return, __VA_ARGS__)
  85. #endif
  86. #ifndef atomic_inc_return
  87. #define atomic_inc_return(...) \
  88. __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
  89. #endif
  90. #endif /* atomic_inc_return_relaxed */
  91. /* atomic_sub_return_relaxed */
  92. #ifndef atomic_sub_return_relaxed
  93. #define atomic_sub_return_relaxed atomic_sub_return
  94. #define atomic_sub_return_acquire atomic_sub_return
  95. #define atomic_sub_return_release atomic_sub_return
  96. #else /* atomic_sub_return_relaxed */
  97. #ifndef atomic_sub_return_acquire
  98. #define atomic_sub_return_acquire(...) \
  99. __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
  100. #endif
  101. #ifndef atomic_sub_return_release
  102. #define atomic_sub_return_release(...) \
  103. __atomic_op_release(atomic_sub_return, __VA_ARGS__)
  104. #endif
  105. #ifndef atomic_sub_return
  106. #define atomic_sub_return(...) \
  107. __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
  108. #endif
  109. #endif /* atomic_sub_return_relaxed */
  110. /* atomic_dec_return_relaxed */
  111. #ifndef atomic_dec_return_relaxed
  112. #define atomic_dec_return_relaxed atomic_dec_return
  113. #define atomic_dec_return_acquire atomic_dec_return
  114. #define atomic_dec_return_release atomic_dec_return
  115. #else /* atomic_dec_return_relaxed */
  116. #ifndef atomic_dec_return_acquire
  117. #define atomic_dec_return_acquire(...) \
  118. __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
  119. #endif
  120. #ifndef atomic_dec_return_release
  121. #define atomic_dec_return_release(...) \
  122. __atomic_op_release(atomic_dec_return, __VA_ARGS__)
  123. #endif
  124. #ifndef atomic_dec_return
  125. #define atomic_dec_return(...) \
  126. __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
  127. #endif
  128. #endif /* atomic_dec_return_relaxed */
  129. /* atomic_xchg_relaxed */
  130. #ifndef atomic_xchg_relaxed
  131. #define atomic_xchg_relaxed atomic_xchg
  132. #define atomic_xchg_acquire atomic_xchg
  133. #define atomic_xchg_release atomic_xchg
  134. #else /* atomic_xchg_relaxed */
  135. #ifndef atomic_xchg_acquire
  136. #define atomic_xchg_acquire(...) \
  137. __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
  138. #endif
  139. #ifndef atomic_xchg_release
  140. #define atomic_xchg_release(...) \
  141. __atomic_op_release(atomic_xchg, __VA_ARGS__)
  142. #endif
  143. #ifndef atomic_xchg
  144. #define atomic_xchg(...) \
  145. __atomic_op_fence(atomic_xchg, __VA_ARGS__)
  146. #endif
  147. #endif /* atomic_xchg_relaxed */
  148. /* atomic_cmpxchg_relaxed */
  149. #ifndef atomic_cmpxchg_relaxed
  150. #define atomic_cmpxchg_relaxed atomic_cmpxchg
  151. #define atomic_cmpxchg_acquire atomic_cmpxchg
  152. #define atomic_cmpxchg_release atomic_cmpxchg
  153. #else /* atomic_cmpxchg_relaxed */
  154. #ifndef atomic_cmpxchg_acquire
  155. #define atomic_cmpxchg_acquire(...) \
  156. __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
  157. #endif
  158. #ifndef atomic_cmpxchg_release
  159. #define atomic_cmpxchg_release(...) \
  160. __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
  161. #endif
  162. #ifndef atomic_cmpxchg
  163. #define atomic_cmpxchg(...) \
  164. __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
  165. #endif
  166. #endif /* atomic_cmpxchg_relaxed */
  167. #ifndef atomic64_read_acquire
  168. #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
  169. #endif
  170. #ifndef atomic64_set_release
  171. #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
  172. #endif
  173. /* atomic64_add_return_relaxed */
  174. #ifndef atomic64_add_return_relaxed
  175. #define atomic64_add_return_relaxed atomic64_add_return
  176. #define atomic64_add_return_acquire atomic64_add_return
  177. #define atomic64_add_return_release atomic64_add_return
  178. #else /* atomic64_add_return_relaxed */
  179. #ifndef atomic64_add_return_acquire
  180. #define atomic64_add_return_acquire(...) \
  181. __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
  182. #endif
  183. #ifndef atomic64_add_return_release
  184. #define atomic64_add_return_release(...) \
  185. __atomic_op_release(atomic64_add_return, __VA_ARGS__)
  186. #endif
  187. #ifndef atomic64_add_return
  188. #define atomic64_add_return(...) \
  189. __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
  190. #endif
  191. #endif /* atomic64_add_return_relaxed */
  192. /* atomic64_inc_return_relaxed */
  193. #ifndef atomic64_inc_return_relaxed
  194. #define atomic64_inc_return_relaxed atomic64_inc_return
  195. #define atomic64_inc_return_acquire atomic64_inc_return
  196. #define atomic64_inc_return_release atomic64_inc_return
  197. #else /* atomic64_inc_return_relaxed */
  198. #ifndef atomic64_inc_return_acquire
  199. #define atomic64_inc_return_acquire(...) \
  200. __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
  201. #endif
  202. #ifndef atomic64_inc_return_release
  203. #define atomic64_inc_return_release(...) \
  204. __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
  205. #endif
  206. #ifndef atomic64_inc_return
  207. #define atomic64_inc_return(...) \
  208. __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
  209. #endif
  210. #endif /* atomic64_inc_return_relaxed */
  211. /* atomic64_sub_return_relaxed */
  212. #ifndef atomic64_sub_return_relaxed
  213. #define atomic64_sub_return_relaxed atomic64_sub_return
  214. #define atomic64_sub_return_acquire atomic64_sub_return
  215. #define atomic64_sub_return_release atomic64_sub_return
  216. #else /* atomic64_sub_return_relaxed */
  217. #ifndef atomic64_sub_return_acquire
  218. #define atomic64_sub_return_acquire(...) \
  219. __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
  220. #endif
  221. #ifndef atomic64_sub_return_release
  222. #define atomic64_sub_return_release(...) \
  223. __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
  224. #endif
  225. #ifndef atomic64_sub_return
  226. #define atomic64_sub_return(...) \
  227. __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
  228. #endif
  229. #endif /* atomic64_sub_return_relaxed */
  230. /* atomic64_dec_return_relaxed */
  231. #ifndef atomic64_dec_return_relaxed
  232. #define atomic64_dec_return_relaxed atomic64_dec_return
  233. #define atomic64_dec_return_acquire atomic64_dec_return
  234. #define atomic64_dec_return_release atomic64_dec_return
  235. #else /* atomic64_dec_return_relaxed */
  236. #ifndef atomic64_dec_return_acquire
  237. #define atomic64_dec_return_acquire(...) \
  238. __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
  239. #endif
  240. #ifndef atomic64_dec_return_release
  241. #define atomic64_dec_return_release(...) \
  242. __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
  243. #endif
  244. #ifndef atomic64_dec_return
  245. #define atomic64_dec_return(...) \
  246. __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
  247. #endif
  248. #endif /* atomic64_dec_return_relaxed */
  249. /* atomic64_xchg_relaxed */
  250. #ifndef atomic64_xchg_relaxed
  251. #define atomic64_xchg_relaxed atomic64_xchg
  252. #define atomic64_xchg_acquire atomic64_xchg
  253. #define atomic64_xchg_release atomic64_xchg
  254. #else /* atomic64_xchg_relaxed */
  255. #ifndef atomic64_xchg_acquire
  256. #define atomic64_xchg_acquire(...) \
  257. __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
  258. #endif
  259. #ifndef atomic64_xchg_release
  260. #define atomic64_xchg_release(...) \
  261. __atomic_op_release(atomic64_xchg, __VA_ARGS__)
  262. #endif
  263. #ifndef atomic64_xchg
  264. #define atomic64_xchg(...) \
  265. __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
  266. #endif
  267. #endif /* atomic64_xchg_relaxed */
  268. /* atomic64_cmpxchg_relaxed */
  269. #ifndef atomic64_cmpxchg_relaxed
  270. #define atomic64_cmpxchg_relaxed atomic64_cmpxchg
  271. #define atomic64_cmpxchg_acquire atomic64_cmpxchg
  272. #define atomic64_cmpxchg_release atomic64_cmpxchg
  273. #else /* atomic64_cmpxchg_relaxed */
  274. #ifndef atomic64_cmpxchg_acquire
  275. #define atomic64_cmpxchg_acquire(...) \
  276. __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
  277. #endif
  278. #ifndef atomic64_cmpxchg_release
  279. #define atomic64_cmpxchg_release(...) \
  280. __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
  281. #endif
  282. #ifndef atomic64_cmpxchg
  283. #define atomic64_cmpxchg(...) \
  284. __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
  285. #endif
  286. #endif /* atomic64_cmpxchg_relaxed */
  287. /* cmpxchg_relaxed */
  288. #ifndef cmpxchg_relaxed
  289. #define cmpxchg_relaxed cmpxchg
  290. #define cmpxchg_acquire cmpxchg
  291. #define cmpxchg_release cmpxchg
  292. #else /* cmpxchg_relaxed */
  293. #ifndef cmpxchg_acquire
  294. #define cmpxchg_acquire(...) \
  295. __atomic_op_acquire(cmpxchg, __VA_ARGS__)
  296. #endif
  297. #ifndef cmpxchg_release
  298. #define cmpxchg_release(...) \
  299. __atomic_op_release(cmpxchg, __VA_ARGS__)
  300. #endif
  301. #ifndef cmpxchg
  302. #define cmpxchg(...) \
  303. __atomic_op_fence(cmpxchg, __VA_ARGS__)
  304. #endif
  305. #endif /* cmpxchg_relaxed */
  306. /* cmpxchg64_relaxed */
  307. #ifndef cmpxchg64_relaxed
  308. #define cmpxchg64_relaxed cmpxchg64
  309. #define cmpxchg64_acquire cmpxchg64
  310. #define cmpxchg64_release cmpxchg64
  311. #else /* cmpxchg64_relaxed */
  312. #ifndef cmpxchg64_acquire
  313. #define cmpxchg64_acquire(...) \
  314. __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
  315. #endif
  316. #ifndef cmpxchg64_release
  317. #define cmpxchg64_release(...) \
  318. __atomic_op_release(cmpxchg64, __VA_ARGS__)
  319. #endif
  320. #ifndef cmpxchg64
  321. #define cmpxchg64(...) \
  322. __atomic_op_fence(cmpxchg64, __VA_ARGS__)
  323. #endif
  324. #endif /* cmpxchg64_relaxed */
  325. /* xchg_relaxed */
  326. #ifndef xchg_relaxed
  327. #define xchg_relaxed xchg
  328. #define xchg_acquire xchg
  329. #define xchg_release xchg
  330. #else /* xchg_relaxed */
  331. #ifndef xchg_acquire
  332. #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
  333. #endif
  334. #ifndef xchg_release
  335. #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
  336. #endif
  337. #ifndef xchg
  338. #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
  339. #endif
  340. #endif /* xchg_relaxed */
  341. /**
  342. * atomic_add_unless - add unless the number is already a given value
  343. * @v: pointer of type atomic_t
  344. * @a: the amount to add to v...
  345. * @u: ...unless v is equal to u.
  346. *
  347. * Atomically adds @a to @v, so long as @v was not already @u.
  348. * Returns non-zero if @v was not @u, and zero otherwise.
  349. */
  350. static inline int atomic_add_unless(atomic_t *v, int a, int u)
  351. {
  352. return __atomic_add_unless(v, a, u) != u;
  353. }
  354. /**
  355. * atomic_inc_not_zero - increment unless the number is zero
  356. * @v: pointer of type atomic_t
  357. *
  358. * Atomically increments @v by 1, so long as @v is non-zero.
  359. * Returns non-zero if @v was non-zero, and zero otherwise.
  360. */
  361. #ifndef atomic_inc_not_zero
  362. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  363. #endif
  364. #ifndef atomic_andnot
  365. static inline void atomic_andnot(int i, atomic_t *v)
  366. {
  367. atomic_and(~i, v);
  368. }
  369. #endif
  370. static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
  371. {
  372. atomic_andnot(mask, v);
  373. }
  374. static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
  375. {
  376. atomic_or(mask, v);
  377. }
  378. /**
  379. * atomic_inc_not_zero_hint - increment if not null
  380. * @v: pointer of type atomic_t
  381. * @hint: probable value of the atomic before the increment
  382. *
  383. * This version of atomic_inc_not_zero() gives a hint of probable
  384. * value of the atomic. This helps processor to not read the memory
  385. * before doing the atomic read/modify/write cycle, lowering
  386. * number of bus transactions on some arches.
  387. *
  388. * Returns: 0 if increment was not done, 1 otherwise.
  389. */
  390. #ifndef atomic_inc_not_zero_hint
  391. static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
  392. {
  393. int val, c = hint;
  394. /* sanity test, should be removed by compiler if hint is a constant */
  395. if (!hint)
  396. return atomic_inc_not_zero(v);
  397. do {
  398. val = atomic_cmpxchg(v, c, c + 1);
  399. if (val == c)
  400. return 1;
  401. c = val;
  402. } while (c);
  403. return 0;
  404. }
  405. #endif
  406. #ifndef atomic_inc_unless_negative
  407. static inline int atomic_inc_unless_negative(atomic_t *p)
  408. {
  409. int v, v1;
  410. for (v = 0; v >= 0; v = v1) {
  411. v1 = atomic_cmpxchg(p, v, v + 1);
  412. if (likely(v1 == v))
  413. return 1;
  414. }
  415. return 0;
  416. }
  417. #endif
  418. #ifndef atomic_dec_unless_positive
  419. static inline int atomic_dec_unless_positive(atomic_t *p)
  420. {
  421. int v, v1;
  422. for (v = 0; v <= 0; v = v1) {
  423. v1 = atomic_cmpxchg(p, v, v - 1);
  424. if (likely(v1 == v))
  425. return 1;
  426. }
  427. return 0;
  428. }
  429. #endif
  430. /*
  431. * atomic_dec_if_positive - decrement by 1 if old value positive
  432. * @v: pointer of type atomic_t
  433. *
  434. * The function returns the old value of *v minus 1, even if
  435. * the atomic variable, v, was not decremented.
  436. */
  437. #ifndef atomic_dec_if_positive
  438. static inline int atomic_dec_if_positive(atomic_t *v)
  439. {
  440. int c, old, dec;
  441. c = atomic_read(v);
  442. for (;;) {
  443. dec = c - 1;
  444. if (unlikely(dec < 0))
  445. break;
  446. old = atomic_cmpxchg((v), c, dec);
  447. if (likely(old == c))
  448. break;
  449. c = old;
  450. }
  451. return dec;
  452. }
  453. #endif
  454. #ifdef CONFIG_GENERIC_ATOMIC64
  455. #include <asm-generic/atomic64.h>
  456. #endif
  457. #ifndef atomic64_andnot
  458. static inline void atomic64_andnot(long long i, atomic64_t *v)
  459. {
  460. atomic64_and(~i, v);
  461. }
  462. #endif
  463. #include <asm-generic/atomic-long.h>
  464. #endif /* _LINUX_ATOMIC_H */