atomic_defs.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #include <asm/spr-regs.h>
  2. #ifdef __ATOMIC_LIB__
  3. #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  4. #define ATOMIC_QUALS
  5. #define ATOMIC_EXPORT(x) EXPORT_SYMBOL(x)
  6. #else /* !OUTOFLINE && LIB */
  7. #define ATOMIC_OP_RETURN(op)
  8. #define ATOMIC_FETCH_OP(op)
  9. #endif /* OUTOFLINE */
  10. #else /* !__ATOMIC_LIB__ */
  11. #define ATOMIC_EXPORT(x)
  12. #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
  13. #define ATOMIC_OP_RETURN(op) \
  14. extern int __atomic_##op##_return(int i, int *v); \
  15. extern long long __atomic64_##op##_return(long long i, long long *v);
  16. #define ATOMIC_FETCH_OP(op) \
  17. extern int __atomic32_fetch_##op(int i, int *v); \
  18. extern long long __atomic64_fetch_##op(long long i, long long *v);
  19. #else /* !OUTOFLINE && !LIB */
  20. #define ATOMIC_QUALS static inline
  21. #endif /* OUTOFLINE */
  22. #endif /* __ATOMIC_LIB__ */
  23. /*
  24. * Note on the 64 bit inline asm variants...
  25. *
  26. * CSTD is a conditional instruction and needs a constrained memory reference.
  27. * Normally 'U' provides the correct constraints for conditional instructions
  28. * and this is used for the 32 bit version, however 'U' does not appear to work
  29. * for 64 bit values (gcc-4.9)
  30. *
  31. * The exact constraint is that conditional instructions cannot deal with an
  32. * immediate displacement in the memory reference, so what we do is we read the
  33. * address through a volatile cast into a local variable in order to insure we
  34. * _have_ to compute the correct address without displacement. This allows us
  35. * to use the regular 'm' for the memory address.
  36. *
  37. * Furthermore, the %Ln operand, which prints the low word register (r+1),
  38. * really only works for registers, this means we cannot allow immediate values
  39. * for the 64 bit versions -- like we do for the 32 bit ones.
  40. *
  41. */
  42. #ifndef ATOMIC_OP_RETURN
  43. #define ATOMIC_OP_RETURN(op) \
  44. ATOMIC_QUALS int __atomic_##op##_return(int i, int *v) \
  45. { \
  46. int val; \
  47. \
  48. asm volatile( \
  49. "0: \n" \
  50. " orcc gr0,gr0,gr0,icc3 \n" \
  51. " ckeq icc3,cc7 \n" \
  52. " ld.p %M0,%1 \n" \
  53. " orcr cc7,cc7,cc3 \n" \
  54. " "#op"%I2 %1,%2,%1 \n" \
  55. " cst.p %1,%M0 ,cc3,#1 \n" \
  56. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  57. " beq icc3,#0,0b \n" \
  58. : "+U"(*v), "=&r"(val) \
  59. : "NPr"(i) \
  60. : "memory", "cc7", "cc3", "icc3" \
  61. ); \
  62. \
  63. return val; \
  64. } \
  65. ATOMIC_EXPORT(__atomic_##op##_return); \
  66. \
  67. ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v) \
  68. { \
  69. long long *__v = READ_ONCE(v); \
  70. long long val; \
  71. \
  72. asm volatile( \
  73. "0: \n" \
  74. " orcc gr0,gr0,gr0,icc3 \n" \
  75. " ckeq icc3,cc7 \n" \
  76. " ldd.p %M0,%1 \n" \
  77. " orcr cc7,cc7,cc3 \n" \
  78. " "#op"cc %L1,%L2,%L1,icc0 \n" \
  79. " "#op"x %1,%2,%1,icc0 \n" \
  80. " cstd.p %1,%M0 ,cc3,#1 \n" \
  81. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  82. " beq icc3,#0,0b \n" \
  83. : "+m"(*__v), "=&e"(val) \
  84. : "e"(i) \
  85. : "memory", "cc7", "cc3", "icc0", "icc3" \
  86. ); \
  87. \
  88. return val; \
  89. } \
  90. ATOMIC_EXPORT(__atomic64_##op##_return);
  91. #endif
  92. #ifndef ATOMIC_FETCH_OP
  93. #define ATOMIC_FETCH_OP(op) \
  94. ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v) \
  95. { \
  96. int old, tmp; \
  97. \
  98. asm volatile( \
  99. "0: \n" \
  100. " orcc gr0,gr0,gr0,icc3 \n" \
  101. " ckeq icc3,cc7 \n" \
  102. " ld.p %M0,%1 \n" \
  103. " orcr cc7,cc7,cc3 \n" \
  104. " "#op"%I3 %1,%3,%2 \n" \
  105. " cst.p %2,%M0 ,cc3,#1 \n" \
  106. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  107. " beq icc3,#0,0b \n" \
  108. : "+U"(*v), "=&r"(old), "=r"(tmp) \
  109. : "NPr"(i) \
  110. : "memory", "cc7", "cc3", "icc3" \
  111. ); \
  112. \
  113. return old; \
  114. } \
  115. ATOMIC_EXPORT(__atomic32_fetch_##op); \
  116. \
  117. ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v) \
  118. { \
  119. long long *__v = READ_ONCE(v); \
  120. long long old, tmp; \
  121. \
  122. asm volatile( \
  123. "0: \n" \
  124. " orcc gr0,gr0,gr0,icc3 \n" \
  125. " ckeq icc3,cc7 \n" \
  126. " ldd.p %M0,%1 \n" \
  127. " orcr cc7,cc7,cc3 \n" \
  128. " "#op" %L1,%L3,%L2 \n" \
  129. " "#op" %1,%3,%2 \n" \
  130. " cstd.p %2,%M0 ,cc3,#1 \n" \
  131. " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
  132. " beq icc3,#0,0b \n" \
  133. : "+m"(*__v), "=&e"(old), "=e"(tmp) \
  134. : "e"(i) \
  135. : "memory", "cc7", "cc3", "icc3" \
  136. ); \
  137. \
  138. return old; \
  139. } \
  140. ATOMIC_EXPORT(__atomic64_fetch_##op);
  141. #endif
  142. ATOMIC_FETCH_OP(or)
  143. ATOMIC_FETCH_OP(and)
  144. ATOMIC_FETCH_OP(xor)
  145. ATOMIC_OP_RETURN(add)
  146. ATOMIC_OP_RETURN(sub)
  147. #undef ATOMIC_FETCH_OP
  148. #undef ATOMIC_OP_RETURN
  149. #undef ATOMIC_QUALS
  150. #undef ATOMIC_EXPORT