atomic32.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /*
  2. * atomic32.c: 32-bit atomic_t implementation
  3. *
  4. * Copyright (C) 2004 Keith M Wesolowski
  5. * Copyright (C) 2007 Kyle McMartin
  6. *
  7. * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
  8. */
  9. #include <linux/atomic.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/module.h>
  12. #ifdef CONFIG_SMP
  13. #define ATOMIC_HASH_SIZE 4
  14. #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
  15. spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
  16. [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
  17. };
  18. #else /* SMP */
  19. static DEFINE_SPINLOCK(dummy);
  20. #define ATOMIC_HASH_SIZE 1
  21. #define ATOMIC_HASH(a) (&dummy)
  22. #endif /* SMP */
  23. #define ATOMIC_OP_RETURN(op, c_op) \
  24. int atomic_##op##_return(int i, atomic_t *v) \
  25. { \
  26. int ret; \
  27. unsigned long flags; \
  28. spin_lock_irqsave(ATOMIC_HASH(v), flags); \
  29. \
  30. ret = (v->counter c_op i); \
  31. \
  32. spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
  33. return ret; \
  34. } \
  35. EXPORT_SYMBOL(atomic_##op##_return);
  36. #define ATOMIC_OP(op, c_op) \
  37. void atomic_##op(int i, atomic_t *v) \
  38. { \
  39. unsigned long flags; \
  40. spin_lock_irqsave(ATOMIC_HASH(v), flags); \
  41. \
  42. v->counter c_op i; \
  43. \
  44. spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
  45. } \
  46. EXPORT_SYMBOL(atomic_##op);
  47. ATOMIC_OP_RETURN(add, +=)
  48. ATOMIC_OP(and, &=)
  49. ATOMIC_OP(or, |=)
  50. ATOMIC_OP(xor, ^=)
  51. #undef ATOMIC_OP_RETURN
  52. #undef ATOMIC_OP
  53. int atomic_xchg(atomic_t *v, int new)
  54. {
  55. int ret;
  56. unsigned long flags;
  57. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  58. ret = v->counter;
  59. v->counter = new;
  60. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  61. return ret;
  62. }
  63. EXPORT_SYMBOL(atomic_xchg);
  64. int atomic_cmpxchg(atomic_t *v, int old, int new)
  65. {
  66. int ret;
  67. unsigned long flags;
  68. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  69. ret = v->counter;
  70. if (likely(ret == old))
  71. v->counter = new;
  72. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  73. return ret;
  74. }
  75. EXPORT_SYMBOL(atomic_cmpxchg);
  76. int __atomic_add_unless(atomic_t *v, int a, int u)
  77. {
  78. int ret;
  79. unsigned long flags;
  80. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  81. ret = v->counter;
  82. if (ret != u)
  83. v->counter += a;
  84. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  85. return ret;
  86. }
  87. EXPORT_SYMBOL(__atomic_add_unless);
  88. /* Atomic operations are already serializing */
  89. void atomic_set(atomic_t *v, int i)
  90. {
  91. unsigned long flags;
  92. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  93. v->counter = i;
  94. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  95. }
  96. EXPORT_SYMBOL(atomic_set);
  97. unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
  98. {
  99. unsigned long old, flags;
  100. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  101. old = *addr;
  102. *addr = old | mask;
  103. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  104. return old & mask;
  105. }
  106. EXPORT_SYMBOL(___set_bit);
  107. unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
  108. {
  109. unsigned long old, flags;
  110. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  111. old = *addr;
  112. *addr = old & ~mask;
  113. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  114. return old & mask;
  115. }
  116. EXPORT_SYMBOL(___clear_bit);
  117. unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
  118. {
  119. unsigned long old, flags;
  120. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  121. old = *addr;
  122. *addr = old ^ mask;
  123. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  124. return old & mask;
  125. }
  126. EXPORT_SYMBOL(___change_bit);
  127. unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
  128. {
  129. unsigned long flags;
  130. u32 prev;
  131. spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
  132. if ((prev = *ptr) == old)
  133. *ptr = new;
  134. spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
  135. return (unsigned long)prev;
  136. }
  137. EXPORT_SYMBOL(__cmpxchg_u32);
  138. unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
  139. {
  140. unsigned long flags;
  141. u32 prev;
  142. spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
  143. prev = *ptr;
  144. *ptr = new;
  145. spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
  146. return (unsigned long)prev;
  147. }
  148. EXPORT_SYMBOL(__xchg_u32);