cmpxchg.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_ARC_CMPXCHG_H
  9. #define __ASM_ARC_CMPXCHG_H
  10. #include <linux/types.h>
  11. #include <asm/barrier.h>
  12. #include <asm/smp.h>
  13. #ifdef CONFIG_ARC_HAS_LLSC
  14. static inline unsigned long
  15. __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
  16. {
  17. unsigned long prev;
  18. /*
  19. * Explicit full memory barrier needed before/after as
  20. * LLOCK/SCOND thmeselves don't provide any such semantics
  21. */
  22. smp_mb();
  23. __asm__ __volatile__(
  24. "1: llock %0, [%1] \n"
  25. " brne %0, %2, 2f \n"
  26. " scond %3, [%1] \n"
  27. " bnz 1b \n"
  28. "2: \n"
  29. : "=&r"(prev) /* Early clobber, to prevent reg reuse */
  30. : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
  31. "ir"(expected),
  32. "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
  33. : "cc", "memory"); /* so that gcc knows memory is being written here */
  34. smp_mb();
  35. return prev;
  36. }
  37. #else
  38. static inline unsigned long
  39. __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
  40. {
  41. unsigned long flags;
  42. int prev;
  43. volatile unsigned long *p = ptr;
  44. /*
  45. * spin lock/unlock provide the needed smp_mb() before/after
  46. */
  47. atomic_ops_lock(flags);
  48. prev = *p;
  49. if (prev == expected)
  50. *p = new;
  51. atomic_ops_unlock(flags);
  52. return prev;
  53. }
  54. #endif /* CONFIG_ARC_HAS_LLSC */
  55. #define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
  56. (unsigned long)(o), (unsigned long)(n)))
  57. /*
  58. * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
  59. * just to gaurantee semantics.
  60. * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
  61. * which also happens to be atomic_ops_lock.
  62. *
  63. * Thus despite semantically being different, implementation of atomic_cmpxchg()
  64. * is same as cmpxchg().
  65. */
  66. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  67. /*
  68. * xchg (reg with memory) based on "Native atomic" EX insn
  69. */
  70. static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
  71. int size)
  72. {
  73. extern unsigned long __xchg_bad_pointer(void);
  74. switch (size) {
  75. case 4:
  76. smp_mb();
  77. __asm__ __volatile__(
  78. " ex %0, [%1] \n"
  79. : "+r"(val)
  80. : "r"(ptr)
  81. : "memory");
  82. smp_mb();
  83. return val;
  84. }
  85. return __xchg_bad_pointer();
  86. }
  87. #define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
  88. sizeof(*(ptr))))
  89. /*
  90. * xchg() maps directly to ARC EX instruction which guarantees atomicity.
  91. * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
  92. * due to a subtle reason:
  93. * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
  94. * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
  95. * Hence xchg() needs to follow same locking rules.
  96. *
  97. * Technically the lock is also needed for UP (boils down to irq save/restore)
  98. * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
  99. * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
  100. * Other way around, xchg is one instruction anyways, so can't be interrupted
  101. * as such
  102. */
  103. #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
  104. #define xchg(ptr, with) \
  105. ({ \
  106. unsigned long flags; \
  107. typeof(*(ptr)) old_val; \
  108. \
  109. atomic_ops_lock(flags); \
  110. old_val = _xchg(ptr, with); \
  111. atomic_ops_unlock(flags); \
  112. old_val; \
  113. })
  114. #else
  115. #define xchg(ptr, with) _xchg(ptr, with)
  116. #endif
  117. /*
  118. * "atomic" variant of xchg()
  119. * REQ: It needs to follow the same serialization rules as other atomic_xxx()
  120. * Since xchg() doesn't always do that, it would seem that following defintion
  121. * is incorrect. But here's the rationale:
  122. * SMP : Even xchg() takes the atomic_ops_lock, so OK.
  123. * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
  124. * is natively "SMP safe", no serialization required).
  125. * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
  126. * could clobber them. atomic_xchg() itself would be 1 insn, so it
  127. * can't be clobbered by others. Thus no serialization required when
  128. * atomic_xchg is involved.
  129. */
  130. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  131. #endif