qspinlock.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. /*
  2. * Queued spinlock
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  15. *
  16. * Authors: Waiman Long <waiman.long@hp.com>
  17. */
  18. #ifndef __ASM_GENERIC_QSPINLOCK_H
  19. #define __ASM_GENERIC_QSPINLOCK_H
  20. #include <asm-generic/qspinlock_types.h>
  21. /**
  22. * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
  23. * @lock : Pointer to queued spinlock structure
  24. *
  25. * There is a very slight possibility of live-lock if the lockers keep coming
  26. * and the waiter is just unfortunate enough to not see any unlock state.
  27. */
  28. #ifndef queued_spin_unlock_wait
  29. extern void queued_spin_unlock_wait(struct qspinlock *lock);
  30. #endif
  31. /**
  32. * queued_spin_is_locked - is the spinlock locked?
  33. * @lock: Pointer to queued spinlock structure
  34. * Return: 1 if it is locked, 0 otherwise
  35. */
  36. #ifndef queued_spin_is_locked
  37. static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
  38. {
  39. /*
  40. * See queued_spin_unlock_wait().
  41. *
  42. * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
  43. * isn't immediately observable.
  44. */
  45. return atomic_read(&lock->val);
  46. }
  47. #endif
  48. /**
  49. * queued_spin_value_unlocked - is the spinlock structure unlocked?
  50. * @lock: queued spinlock structure
  51. * Return: 1 if it is unlocked, 0 otherwise
  52. *
  53. * N.B. Whenever there are tasks waiting for the lock, it is considered
  54. * locked wrt the lockref code to avoid lock stealing by the lockref
  55. * code and change things underneath the lock. This also allows some
  56. * optimizations to be applied without conflict with lockref.
  57. */
  58. static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
  59. {
  60. return !atomic_read(&lock.val);
  61. }
  62. /**
  63. * queued_spin_is_contended - check if the lock is contended
  64. * @lock : Pointer to queued spinlock structure
  65. * Return: 1 if lock contended, 0 otherwise
  66. */
  67. static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
  68. {
  69. return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
  70. }
  71. /**
  72. * queued_spin_trylock - try to acquire the queued spinlock
  73. * @lock : Pointer to queued spinlock structure
  74. * Return: 1 if lock acquired, 0 if failed
  75. */
  76. static __always_inline int queued_spin_trylock(struct qspinlock *lock)
  77. {
  78. if (!atomic_read(&lock->val) &&
  79. (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
  80. return 1;
  81. return 0;
  82. }
  83. extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  84. /**
  85. * queued_spin_lock - acquire a queued spinlock
  86. * @lock: Pointer to queued spinlock structure
  87. */
  88. static __always_inline void queued_spin_lock(struct qspinlock *lock)
  89. {
  90. u32 val;
  91. val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
  92. if (likely(val == 0))
  93. return;
  94. queued_spin_lock_slowpath(lock, val);
  95. }
  96. #ifndef queued_spin_unlock
  97. /**
  98. * queued_spin_unlock - release a queued spinlock
  99. * @lock : Pointer to queued spinlock structure
  100. */
  101. static __always_inline void queued_spin_unlock(struct qspinlock *lock)
  102. {
  103. /*
  104. * smp_mb__before_atomic() in order to guarantee release semantics
  105. */
  106. smp_mb__before_atomic_dec();
  107. atomic_sub(_Q_LOCKED_VAL, &lock->val);
  108. }
  109. #endif
  110. #ifndef virt_spin_lock
  111. static __always_inline bool virt_spin_lock(struct qspinlock *lock)
  112. {
  113. return false;
  114. }
  115. #endif
  116. /*
  117. * Initializier
  118. */
  119. #define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
  120. /*
  121. * Remapping spinlock architecture specific functions to the corresponding
  122. * queued spinlock functions.
  123. */
  124. #define arch_spin_is_locked(l) queued_spin_is_locked(l)
  125. #define arch_spin_is_contended(l) queued_spin_is_contended(l)
  126. #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
  127. #define arch_spin_lock(l) queued_spin_lock(l)
  128. #define arch_spin_trylock(l) queued_spin_trylock(l)
  129. #define arch_spin_unlock(l) queued_spin_unlock(l)
  130. #define arch_spin_lock_flags(l, f) queued_spin_lock(l)
  131. #define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
  132. #endif /* __ASM_GENERIC_QSPINLOCK_H */