lockref.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. #include <linux/export.h>
  2. #include <linux/lockref.h>
  3. #if USE_CMPXCHG_LOCKREF
  4. /*
  5. * Note that the "cmpxchg()" reloads the "old" value for the
  6. * failure case.
  7. */
  8. #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
  9. struct lockref old; \
  10. BUILD_BUG_ON(sizeof(old) != 8); \
  11. old.lock_count = READ_ONCE(lockref->lock_count); \
  12. while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
  13. struct lockref new = old, prev = old; \
  14. CODE \
  15. old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
  16. old.lock_count, \
  17. new.lock_count); \
  18. if (likely(old.lock_count == prev.lock_count)) { \
  19. SUCCESS; \
  20. } \
  21. cpu_relax_lowlatency(); \
  22. } \
  23. } while (0)
  24. #else
  25. #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  26. #endif
  27. /**
  28. * lockref_get - Increments reference count unconditionally
  29. * @lockref: pointer to lockref structure
  30. *
  31. * This operation is only valid if you already hold a reference
  32. * to the object, so you know the count cannot be zero.
  33. */
  34. void lockref_get(struct lockref *lockref)
  35. {
  36. CMPXCHG_LOOP(
  37. new.count++;
  38. ,
  39. return;
  40. );
  41. spin_lock(&lockref->lock);
  42. lockref->count++;
  43. spin_unlock(&lockref->lock);
  44. }
  45. EXPORT_SYMBOL(lockref_get);
  46. /**
  47. * lockref_get_not_zero - Increments count unless the count is 0 or dead
  48. * @lockref: pointer to lockref structure
  49. * Return: 1 if count updated successfully or 0 if count was zero
  50. */
  51. int lockref_get_not_zero(struct lockref *lockref)
  52. {
  53. int retval;
  54. CMPXCHG_LOOP(
  55. new.count++;
  56. if (old.count <= 0)
  57. return 0;
  58. ,
  59. return 1;
  60. );
  61. spin_lock(&lockref->lock);
  62. retval = 0;
  63. if (lockref->count > 0) {
  64. lockref->count++;
  65. retval = 1;
  66. }
  67. spin_unlock(&lockref->lock);
  68. return retval;
  69. }
  70. EXPORT_SYMBOL(lockref_get_not_zero);
  71. /**
  72. * lockref_get_or_lock - Increments count unless the count is 0 or dead
  73. * @lockref: pointer to lockref structure
  74. * Return: 1 if count updated successfully or 0 if count was zero
  75. * and we got the lock instead.
  76. */
  77. int lockref_get_or_lock(struct lockref *lockref)
  78. {
  79. CMPXCHG_LOOP(
  80. new.count++;
  81. if (old.count <= 0)
  82. break;
  83. ,
  84. return 1;
  85. );
  86. spin_lock(&lockref->lock);
  87. if (lockref->count <= 0)
  88. return 0;
  89. lockref->count++;
  90. spin_unlock(&lockref->lock);
  91. return 1;
  92. }
  93. EXPORT_SYMBOL(lockref_get_or_lock);
  94. /**
  95. * lockref_put_return - Decrement reference count if possible
  96. * @lockref: pointer to lockref structure
  97. *
  98. * Decrement the reference count and return the new value.
  99. * If the lockref was dead or locked, return an error.
  100. */
  101. int lockref_put_return(struct lockref *lockref)
  102. {
  103. CMPXCHG_LOOP(
  104. new.count--;
  105. if (old.count <= 0)
  106. return -1;
  107. ,
  108. return new.count;
  109. );
  110. return -1;
  111. }
  112. EXPORT_SYMBOL(lockref_put_return);
  113. /**
  114. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  115. * @lockref: pointer to lockref structure
  116. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  117. */
  118. int lockref_put_or_lock(struct lockref *lockref)
  119. {
  120. CMPXCHG_LOOP(
  121. new.count--;
  122. if (old.count <= 1)
  123. break;
  124. ,
  125. return 1;
  126. );
  127. spin_lock(&lockref->lock);
  128. if (lockref->count <= 1)
  129. return 0;
  130. lockref->count--;
  131. spin_unlock(&lockref->lock);
  132. return 1;
  133. }
  134. EXPORT_SYMBOL(lockref_put_or_lock);
  135. /**
  136. * lockref_mark_dead - mark lockref dead
  137. * @lockref: pointer to lockref structure
  138. */
  139. void lockref_mark_dead(struct lockref *lockref)
  140. {
  141. assert_spin_locked(&lockref->lock);
  142. lockref->count = -128;
  143. }
  144. EXPORT_SYMBOL(lockref_mark_dead);
  145. /**
  146. * lockref_get_not_dead - Increments count unless the ref is dead
  147. * @lockref: pointer to lockref structure
  148. * Return: 1 if count updated successfully or 0 if lockref was dead
  149. */
  150. int lockref_get_not_dead(struct lockref *lockref)
  151. {
  152. int retval;
  153. CMPXCHG_LOOP(
  154. new.count++;
  155. if (old.count < 0)
  156. return 0;
  157. ,
  158. return 1;
  159. );
  160. spin_lock(&lockref->lock);
  161. retval = 0;
  162. if (lockref->count >= 0) {
  163. lockref->count++;
  164. retval = 1;
  165. }
  166. spin_unlock(&lockref->lock);
  167. return retval;
  168. }
  169. EXPORT_SYMBOL(lockref_get_not_dead);