spinlock_debug.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/nmi.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/debug_locks.h>
  12. #include <linux/delay.h>
  13. #include <linux/export.h>
  14. void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
  15. struct lock_class_key *key)
  16. {
  17. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  18. /*
  19. * Make sure we are not reinitializing a held lock:
  20. */
  21. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  22. lockdep_init_map(&lock->dep_map, name, key, 0);
  23. #endif
  24. lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  25. lock->magic = SPINLOCK_MAGIC;
  26. lock->owner = SPINLOCK_OWNER_INIT;
  27. lock->owner_cpu = -1;
  28. }
  29. EXPORT_SYMBOL(__raw_spin_lock_init);
  30. void __rwlock_init(rwlock_t *lock, const char *name,
  31. struct lock_class_key *key)
  32. {
  33. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  34. /*
  35. * Make sure we are not reinitializing a held lock:
  36. */
  37. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  38. lockdep_init_map(&lock->dep_map, name, key, 0);
  39. #endif
  40. lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
  41. lock->magic = RWLOCK_MAGIC;
  42. lock->owner = SPINLOCK_OWNER_INIT;
  43. lock->owner_cpu = -1;
  44. }
  45. EXPORT_SYMBOL(__rwlock_init);
  46. static void spin_dump(raw_spinlock_t *lock, const char *msg)
  47. {
  48. struct task_struct *owner = NULL;
  49. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  50. owner = lock->owner;
  51. printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  52. msg, raw_smp_processor_id(),
  53. current->comm, task_pid_nr(current));
  54. printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
  55. ".owner_cpu: %d\n",
  56. lock, lock->magic,
  57. owner ? owner->comm : "<none>",
  58. owner ? task_pid_nr(owner) : -1,
  59. lock->owner_cpu);
  60. dump_stack();
  61. }
  62. static void spin_bug(raw_spinlock_t *lock, const char *msg)
  63. {
  64. if (!debug_locks_off())
  65. return;
  66. spin_dump(lock, msg);
  67. }
  68. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  69. static inline void
  70. debug_spin_lock_before(raw_spinlock_t *lock)
  71. {
  72. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  73. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  74. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  75. lock, "cpu recursion");
  76. }
  77. static inline void debug_spin_lock_after(raw_spinlock_t *lock)
  78. {
  79. lock->owner_cpu = raw_smp_processor_id();
  80. lock->owner = current;
  81. }
  82. static inline void debug_spin_unlock(raw_spinlock_t *lock)
  83. {
  84. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  85. SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
  86. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  87. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  88. lock, "wrong CPU");
  89. lock->owner = SPINLOCK_OWNER_INIT;
  90. lock->owner_cpu = -1;
  91. }
  92. static void __spin_lock_debug(raw_spinlock_t *lock)
  93. {
  94. u64 i;
  95. u64 loops = loops_per_jiffy * HZ;
  96. for (i = 0; i < loops; i++) {
  97. if (arch_spin_trylock(&lock->raw_lock))
  98. return;
  99. __delay(1);
  100. }
  101. /* lockup suspected: */
  102. spin_dump(lock, "lockup suspected");
  103. #ifdef CONFIG_SMP
  104. trigger_all_cpu_backtrace();
  105. #endif
  106. /*
  107. * The trylock above was causing a livelock. Give the lower level arch
  108. * specific lock code a chance to acquire the lock. We have already
  109. * printed a warning/backtrace at this point. The non-debug arch
  110. * specific code might actually succeed in acquiring the lock. If it is
  111. * not successful, the end-result is the same - there is no forward
  112. * progress.
  113. */
  114. arch_spin_lock(&lock->raw_lock);
  115. }
  116. void do_raw_spin_lock(raw_spinlock_t *lock)
  117. {
  118. debug_spin_lock_before(lock);
  119. if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
  120. __spin_lock_debug(lock);
  121. debug_spin_lock_after(lock);
  122. }
  123. int do_raw_spin_trylock(raw_spinlock_t *lock)
  124. {
  125. int ret = arch_spin_trylock(&lock->raw_lock);
  126. if (ret)
  127. debug_spin_lock_after(lock);
  128. #ifndef CONFIG_SMP
  129. /*
  130. * Must not happen on UP:
  131. */
  132. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  133. #endif
  134. return ret;
  135. }
  136. void do_raw_spin_unlock(raw_spinlock_t *lock)
  137. {
  138. debug_spin_unlock(lock);
  139. arch_spin_unlock(&lock->raw_lock);
  140. }
  141. static void rwlock_bug(rwlock_t *lock, const char *msg)
  142. {
  143. if (!debug_locks_off())
  144. return;
  145. printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  146. msg, raw_smp_processor_id(), current->comm,
  147. task_pid_nr(current), lock);
  148. dump_stack();
  149. }
  150. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  151. #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
  152. static void __read_lock_debug(rwlock_t *lock)
  153. {
  154. u64 i;
  155. u64 loops = loops_per_jiffy * HZ;
  156. int print_once = 1;
  157. for (;;) {
  158. for (i = 0; i < loops; i++) {
  159. if (arch_read_trylock(&lock->raw_lock))
  160. return;
  161. __delay(1);
  162. }
  163. /* lockup suspected: */
  164. if (print_once) {
  165. print_once = 0;
  166. printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  167. "%s/%d, %p\n",
  168. raw_smp_processor_id(), current->comm,
  169. current->pid, lock);
  170. dump_stack();
  171. }
  172. }
  173. }
  174. #endif
  175. void do_raw_read_lock(rwlock_t *lock)
  176. {
  177. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  178. arch_read_lock(&lock->raw_lock);
  179. }
  180. int do_raw_read_trylock(rwlock_t *lock)
  181. {
  182. int ret = arch_read_trylock(&lock->raw_lock);
  183. #ifndef CONFIG_SMP
  184. /*
  185. * Must not happen on UP:
  186. */
  187. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  188. #endif
  189. return ret;
  190. }
  191. void do_raw_read_unlock(rwlock_t *lock)
  192. {
  193. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  194. arch_read_unlock(&lock->raw_lock);
  195. }
  196. static inline void debug_write_lock_before(rwlock_t *lock)
  197. {
  198. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  199. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  200. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  201. lock, "cpu recursion");
  202. }
  203. static inline void debug_write_lock_after(rwlock_t *lock)
  204. {
  205. lock->owner_cpu = raw_smp_processor_id();
  206. lock->owner = current;
  207. }
  208. static inline void debug_write_unlock(rwlock_t *lock)
  209. {
  210. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  211. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  212. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  213. lock, "wrong CPU");
  214. lock->owner = SPINLOCK_OWNER_INIT;
  215. lock->owner_cpu = -1;
  216. }
  217. #if 0 /* This can cause lockups */
  218. static void __write_lock_debug(rwlock_t *lock)
  219. {
  220. u64 i;
  221. u64 loops = loops_per_jiffy * HZ;
  222. int print_once = 1;
  223. for (;;) {
  224. for (i = 0; i < loops; i++) {
  225. if (arch_write_trylock(&lock->raw_lock))
  226. return;
  227. __delay(1);
  228. }
  229. /* lockup suspected: */
  230. if (print_once) {
  231. print_once = 0;
  232. printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  233. "%s/%d, %p\n",
  234. raw_smp_processor_id(), current->comm,
  235. current->pid, lock);
  236. dump_stack();
  237. }
  238. }
  239. }
  240. #endif
  241. void do_raw_write_lock(rwlock_t *lock)
  242. {
  243. debug_write_lock_before(lock);
  244. arch_write_lock(&lock->raw_lock);
  245. debug_write_lock_after(lock);
  246. }
  247. int do_raw_write_trylock(rwlock_t *lock)
  248. {
  249. int ret = arch_write_trylock(&lock->raw_lock);
  250. if (ret)
  251. debug_write_lock_after(lock);
  252. #ifndef CONFIG_SMP
  253. /*
  254. * Must not happen on UP:
  255. */
  256. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  257. #endif
  258. return ret;
  259. }
  260. void do_raw_write_unlock(rwlock_t *lock)
  261. {
  262. debug_write_unlock(lock);
  263. arch_write_unlock(&lock->raw_lock);
  264. }