bit_spinlock.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. #ifndef __LINUX_BIT_SPINLOCK_H
  2. #define __LINUX_BIT_SPINLOCK_H
  3. #include <linux/kernel.h>
  4. #include <linux/preempt.h>
  5. #include <linux/atomic.h>
  6. #include <linux/bug.h>
  7. /*
  8. * bit-based spin_lock()
  9. *
  10. * Don't use this unless you really need to: spin_lock() and spin_unlock()
  11. * are significantly faster.
  12. */
  13. static inline void bit_spin_lock(int bitnum, unsigned long *addr)
  14. {
  15. /*
  16. * Assuming the lock is uncontended, this never enters
  17. * the body of the outer loop. If it is contended, then
  18. * within the inner loop a non-atomic test is used to
  19. * busywait with less bus contention for a good time to
  20. * attempt to acquire the lock bit.
  21. */
  22. preempt_disable();
  23. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  24. while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
  25. preempt_enable();
  26. do {
  27. cpu_relax();
  28. } while (test_bit(bitnum, addr));
  29. preempt_disable();
  30. }
  31. #endif
  32. __acquire(bitlock);
  33. }
  34. /*
  35. * Return true if it was acquired
  36. */
  37. static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
  38. {
  39. preempt_disable();
  40. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  41. if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
  42. preempt_enable();
  43. return 0;
  44. }
  45. #endif
  46. __acquire(bitlock);
  47. return 1;
  48. }
  49. /*
  50. * bit-based spin_unlock()
  51. */
  52. static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
  53. {
  54. #ifdef CONFIG_DEBUG_SPINLOCK
  55. BUG_ON(!test_bit(bitnum, addr));
  56. #endif
  57. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  58. clear_bit_unlock(bitnum, addr);
  59. #endif
  60. preempt_enable();
  61. __release(bitlock);
  62. }
  63. /*
  64. * bit-based spin_unlock()
  65. * non-atomic version, which can be used eg. if the bit lock itself is
  66. * protecting the rest of the flags in the word.
  67. */
  68. static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
  69. {
  70. #ifdef CONFIG_DEBUG_SPINLOCK
  71. BUG_ON(!test_bit(bitnum, addr));
  72. #endif
  73. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  74. __clear_bit_unlock(bitnum, addr);
  75. #endif
  76. preempt_enable();
  77. __release(bitlock);
  78. }
  79. /*
  80. * Return true if the lock is held.
  81. */
  82. static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
  83. {
  84. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  85. return test_bit(bitnum, addr);
  86. #elif defined CONFIG_PREEMPT_COUNT
  87. return preempt_count();
  88. #else
  89. return 1;
  90. #endif
  91. }
  92. #endif /* __LINUX_BIT_SPINLOCK_H */