atomic.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
  2. #define _ASM_GENERIC_BITOPS_ATOMIC_H_
  3. #include <asm/types.h>
  4. #include <linux/irqflags.h>
  5. #ifdef CONFIG_SMP
  6. #include <asm/spinlock.h>
  7. #include <asm/cache.h> /* we use L1_CACHE_BYTES */
  8. /* Use an array of spinlocks for our atomic_ts.
  9. * Hash function to index into a different SPINLOCK.
  10. * Since "a" is usually an address, use one spinlock per cacheline.
  11. */
  12. # define ATOMIC_HASH_SIZE 4
  13. # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
  14. extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
  15. /* Can't use raw_spin_lock_irq because of #include problems, so
  16. * this is the substitute */
  17. #define _atomic_spin_lock_irqsave(l,f) do { \
  18. arch_spinlock_t *s = ATOMIC_HASH(l); \
  19. local_irq_save(f); \
  20. arch_spin_lock(s); \
  21. } while(0)
  22. #define _atomic_spin_unlock_irqrestore(l,f) do { \
  23. arch_spinlock_t *s = ATOMIC_HASH(l); \
  24. arch_spin_unlock(s); \
  25. local_irq_restore(f); \
  26. } while(0)
  27. #else
  28. # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
  29. # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
  30. #endif
  31. /*
  32. * NMI events can occur at any time, including when interrupts have been
  33. * disabled by *_irqsave(). So you can get NMI events occurring while a
  34. * *_bit function is holding a spin lock. If the NMI handler also wants
  35. * to do bit manipulation (and they do) then you can get a deadlock
  36. * between the original caller of *_bit() and the NMI handler.
  37. *
  38. * by Keith Owens
  39. */
  40. /**
  41. * set_bit - Atomically set a bit in memory
  42. * @nr: the bit to set
  43. * @addr: the address to start counting from
  44. *
  45. * This function is atomic and may not be reordered. See __set_bit()
  46. * if you do not require the atomic guarantees.
  47. *
  48. * Note: there are no guarantees that this function will not be reordered
  49. * on non x86 architectures, so if you are writing portable code,
  50. * make sure not to rely on its reordering guarantees.
  51. *
  52. * Note that @nr may be almost arbitrarily large; this function is not
  53. * restricted to acting on a single-word quantity.
  54. */
  55. static inline void set_bit(int nr, volatile unsigned long *addr)
  56. {
  57. unsigned long mask = BIT_MASK(nr);
  58. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  59. unsigned long flags;
  60. _atomic_spin_lock_irqsave(p, flags);
  61. *p |= mask;
  62. _atomic_spin_unlock_irqrestore(p, flags);
  63. }
  64. /**
  65. * clear_bit - Clears a bit in memory
  66. * @nr: Bit to clear
  67. * @addr: Address to start counting from
  68. *
  69. * clear_bit() is atomic and may not be reordered. However, it does
  70. * not contain a memory barrier, so if it is used for locking purposes,
  71. * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  72. * in order to ensure changes are visible on other processors.
  73. */
  74. static inline void clear_bit(int nr, volatile unsigned long *addr)
  75. {
  76. unsigned long mask = BIT_MASK(nr);
  77. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  78. unsigned long flags;
  79. _atomic_spin_lock_irqsave(p, flags);
  80. *p &= ~mask;
  81. _atomic_spin_unlock_irqrestore(p, flags);
  82. }
  83. /**
  84. * change_bit - Toggle a bit in memory
  85. * @nr: Bit to change
  86. * @addr: Address to start counting from
  87. *
  88. * change_bit() is atomic and may not be reordered. It may be
  89. * reordered on other architectures than x86.
  90. * Note that @nr may be almost arbitrarily large; this function is not
  91. * restricted to acting on a single-word quantity.
  92. */
  93. static inline void change_bit(int nr, volatile unsigned long *addr)
  94. {
  95. unsigned long mask = BIT_MASK(nr);
  96. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  97. unsigned long flags;
  98. _atomic_spin_lock_irqsave(p, flags);
  99. *p ^= mask;
  100. _atomic_spin_unlock_irqrestore(p, flags);
  101. }
  102. /**
  103. * test_and_set_bit - Set a bit and return its old value
  104. * @nr: Bit to set
  105. * @addr: Address to count from
  106. *
  107. * This operation is atomic and cannot be reordered.
  108. * It may be reordered on other architectures than x86.
  109. * It also implies a memory barrier.
  110. */
  111. static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
  112. {
  113. unsigned long mask = BIT_MASK(nr);
  114. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  115. unsigned long old;
  116. unsigned long flags;
  117. _atomic_spin_lock_irqsave(p, flags);
  118. old = *p;
  119. *p = old | mask;
  120. _atomic_spin_unlock_irqrestore(p, flags);
  121. return (old & mask) != 0;
  122. }
  123. /**
  124. * test_and_clear_bit - Clear a bit and return its old value
  125. * @nr: Bit to clear
  126. * @addr: Address to count from
  127. *
  128. * This operation is atomic and cannot be reordered.
  129. * It can be reorderdered on other architectures other than x86.
  130. * It also implies a memory barrier.
  131. */
  132. static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
  133. {
  134. unsigned long mask = BIT_MASK(nr);
  135. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  136. unsigned long old;
  137. unsigned long flags;
  138. _atomic_spin_lock_irqsave(p, flags);
  139. old = *p;
  140. *p = old & ~mask;
  141. _atomic_spin_unlock_irqrestore(p, flags);
  142. return (old & mask) != 0;
  143. }
  144. /**
  145. * test_and_change_bit - Change a bit and return its old value
  146. * @nr: Bit to change
  147. * @addr: Address to count from
  148. *
  149. * This operation is atomic and cannot be reordered.
  150. * It also implies a memory barrier.
  151. */
  152. static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
  153. {
  154. unsigned long mask = BIT_MASK(nr);
  155. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  156. unsigned long old;
  157. unsigned long flags;
  158. _atomic_spin_lock_irqsave(p, flags);
  159. old = *p;
  160. *p = old ^ mask;
  161. _atomic_spin_unlock_irqrestore(p, flags);
  162. return (old & mask) != 0;
  163. }
  164. #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */