spinlock.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. #ifndef _ASM_M32R_SPINLOCK_H
  2. #define _ASM_M32R_SPINLOCK_H
  3. /*
  4. * linux/include/asm-m32r/spinlock.h
  5. *
  6. * M32R version:
  7. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  8. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  9. */
  10. #include <linux/compiler.h>
  11. #include <linux/atomic.h>
  12. #include <asm/dcache_clear.h>
  13. #include <asm/page.h>
  14. /*
  15. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  16. *
  17. * (the type definitions are in asm/spinlock_types.h)
  18. *
  19. * Simple spin lock operations. There are two variants, one clears IRQ's
  20. * on the local processor, one does not.
  21. *
  22. * We make no fairness assumptions. They have a cost.
  23. */
  24. #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
  25. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  26. #define arch_spin_unlock_wait(x) \
  27. do { cpu_relax(); } while (arch_spin_is_locked(x))
  28. /**
  29. * arch_spin_trylock - Try spin lock and return a result
  30. * @lock: Pointer to the lock variable
  31. *
  32. * arch_spin_trylock() tries to get the lock and returns a result.
  33. * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
  34. */
  35. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  36. {
  37. int oldval;
  38. unsigned long tmp1, tmp2;
  39. /*
  40. * lock->slock : =1 : unlock
  41. * : <=0 : lock
  42. * {
  43. * oldval = lock->slock; <--+ need atomic operation
  44. * lock->slock = 0; <--+
  45. * }
  46. */
  47. __asm__ __volatile__ (
  48. "# arch_spin_trylock \n\t"
  49. "ldi %1, #0; \n\t"
  50. "mvfc %2, psw; \n\t"
  51. "clrpsw #0x40 -> nop; \n\t"
  52. DCACHE_CLEAR("%0", "r6", "%3")
  53. "lock %0, @%3; \n\t"
  54. "unlock %1, @%3; \n\t"
  55. "mvtc %2, psw; \n\t"
  56. : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
  57. : "r" (&lock->slock)
  58. : "memory"
  59. #ifdef CONFIG_CHIP_M32700_TS1
  60. , "r6"
  61. #endif /* CONFIG_CHIP_M32700_TS1 */
  62. );
  63. return (oldval > 0);
  64. }
  65. static inline void arch_spin_lock(arch_spinlock_t *lock)
  66. {
  67. unsigned long tmp0, tmp1;
  68. /*
  69. * lock->slock : =1 : unlock
  70. * : <=0 : lock
  71. *
  72. * for ( ; ; ) {
  73. * lock->slock -= 1; <-- need atomic operation
  74. * if (lock->slock == 0) break;
  75. * for ( ; lock->slock <= 0 ; );
  76. * }
  77. */
  78. __asm__ __volatile__ (
  79. "# arch_spin_lock \n\t"
  80. ".fillinsn \n"
  81. "1: \n\t"
  82. "mvfc %1, psw; \n\t"
  83. "clrpsw #0x40 -> nop; \n\t"
  84. DCACHE_CLEAR("%0", "r6", "%2")
  85. "lock %0, @%2; \n\t"
  86. "addi %0, #-1; \n\t"
  87. "unlock %0, @%2; \n\t"
  88. "mvtc %1, psw; \n\t"
  89. "bltz %0, 2f; \n\t"
  90. LOCK_SECTION_START(".balign 4 \n\t")
  91. ".fillinsn \n"
  92. "2: \n\t"
  93. "ld %0, @%2; \n\t"
  94. "bgtz %0, 1b; \n\t"
  95. "bra 2b; \n\t"
  96. LOCK_SECTION_END
  97. : "=&r" (tmp0), "=&r" (tmp1)
  98. : "r" (&lock->slock)
  99. : "memory"
  100. #ifdef CONFIG_CHIP_M32700_TS1
  101. , "r6"
  102. #endif /* CONFIG_CHIP_M32700_TS1 */
  103. );
  104. }
  105. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  106. {
  107. mb();
  108. lock->slock = 1;
  109. }
  110. /*
  111. * Read-write spinlocks, allowing multiple readers
  112. * but only one writer.
  113. *
  114. * NOTE! it is quite common to have readers in interrupts
  115. * but no interrupt writers. For those circumstances we
  116. * can "mix" irq-safe locks - any writer needs to get a
  117. * irq-safe write-lock, but readers can get non-irqsafe
  118. * read-locks.
  119. *
  120. * On x86, we implement read-write locks as a 32-bit counter
  121. * with the high bit (sign) being the "contended" bit.
  122. *
  123. * The inline assembly is non-obvious. Think about it.
  124. *
  125. * Changed to use the same technique as rw semaphores. See
  126. * semaphore.h for details. -ben
  127. */
  128. /**
  129. * read_can_lock - would read_trylock() succeed?
  130. * @lock: the rwlock in question.
  131. */
  132. #define arch_read_can_lock(x) ((int)(x)->lock > 0)
  133. /**
  134. * write_can_lock - would write_trylock() succeed?
  135. * @lock: the rwlock in question.
  136. */
  137. #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
  138. static inline void arch_read_lock(arch_rwlock_t *rw)
  139. {
  140. unsigned long tmp0, tmp1;
  141. /*
  142. * rw->lock : >0 : unlock
  143. * : <=0 : lock
  144. *
  145. * for ( ; ; ) {
  146. * rw->lock -= 1; <-- need atomic operation
  147. * if (rw->lock >= 0) break;
  148. * rw->lock += 1; <-- need atomic operation
  149. * for ( ; rw->lock <= 0 ; );
  150. * }
  151. */
  152. __asm__ __volatile__ (
  153. "# read_lock \n\t"
  154. ".fillinsn \n"
  155. "1: \n\t"
  156. "mvfc %1, psw; \n\t"
  157. "clrpsw #0x40 -> nop; \n\t"
  158. DCACHE_CLEAR("%0", "r6", "%2")
  159. "lock %0, @%2; \n\t"
  160. "addi %0, #-1; \n\t"
  161. "unlock %0, @%2; \n\t"
  162. "mvtc %1, psw; \n\t"
  163. "bltz %0, 2f; \n\t"
  164. LOCK_SECTION_START(".balign 4 \n\t")
  165. ".fillinsn \n"
  166. "2: \n\t"
  167. "clrpsw #0x40 -> nop; \n\t"
  168. DCACHE_CLEAR("%0", "r6", "%2")
  169. "lock %0, @%2; \n\t"
  170. "addi %0, #1; \n\t"
  171. "unlock %0, @%2; \n\t"
  172. "mvtc %1, psw; \n\t"
  173. ".fillinsn \n"
  174. "3: \n\t"
  175. "ld %0, @%2; \n\t"
  176. "bgtz %0, 1b; \n\t"
  177. "bra 3b; \n\t"
  178. LOCK_SECTION_END
  179. : "=&r" (tmp0), "=&r" (tmp1)
  180. : "r" (&rw->lock)
  181. : "memory"
  182. #ifdef CONFIG_CHIP_M32700_TS1
  183. , "r6"
  184. #endif /* CONFIG_CHIP_M32700_TS1 */
  185. );
  186. }
  187. static inline void arch_write_lock(arch_rwlock_t *rw)
  188. {
  189. unsigned long tmp0, tmp1, tmp2;
  190. /*
  191. * rw->lock : =RW_LOCK_BIAS_STR : unlock
  192. * : !=RW_LOCK_BIAS_STR : lock
  193. *
  194. * for ( ; ; ) {
  195. * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation
  196. * if (rw->lock == 0) break;
  197. * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation
  198. * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
  199. * }
  200. */
  201. __asm__ __volatile__ (
  202. "# write_lock \n\t"
  203. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  204. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  205. ".fillinsn \n"
  206. "1: \n\t"
  207. "mvfc %2, psw; \n\t"
  208. "clrpsw #0x40 -> nop; \n\t"
  209. DCACHE_CLEAR("%0", "r7", "%3")
  210. "lock %0, @%3; \n\t"
  211. "sub %0, %1; \n\t"
  212. "unlock %0, @%3; \n\t"
  213. "mvtc %2, psw; \n\t"
  214. "bnez %0, 2f; \n\t"
  215. LOCK_SECTION_START(".balign 4 \n\t")
  216. ".fillinsn \n"
  217. "2: \n\t"
  218. "clrpsw #0x40 -> nop; \n\t"
  219. DCACHE_CLEAR("%0", "r7", "%3")
  220. "lock %0, @%3; \n\t"
  221. "add %0, %1; \n\t"
  222. "unlock %0, @%3; \n\t"
  223. "mvtc %2, psw; \n\t"
  224. ".fillinsn \n"
  225. "3: \n\t"
  226. "ld %0, @%3; \n\t"
  227. "beq %0, %1, 1b; \n\t"
  228. "bra 3b; \n\t"
  229. LOCK_SECTION_END
  230. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  231. : "r" (&rw->lock)
  232. : "memory"
  233. #ifdef CONFIG_CHIP_M32700_TS1
  234. , "r7"
  235. #endif /* CONFIG_CHIP_M32700_TS1 */
  236. );
  237. }
  238. static inline void arch_read_unlock(arch_rwlock_t *rw)
  239. {
  240. unsigned long tmp0, tmp1;
  241. __asm__ __volatile__ (
  242. "# read_unlock \n\t"
  243. "mvfc %1, psw; \n\t"
  244. "clrpsw #0x40 -> nop; \n\t"
  245. DCACHE_CLEAR("%0", "r6", "%2")
  246. "lock %0, @%2; \n\t"
  247. "addi %0, #1; \n\t"
  248. "unlock %0, @%2; \n\t"
  249. "mvtc %1, psw; \n\t"
  250. : "=&r" (tmp0), "=&r" (tmp1)
  251. : "r" (&rw->lock)
  252. : "memory"
  253. #ifdef CONFIG_CHIP_M32700_TS1
  254. , "r6"
  255. #endif /* CONFIG_CHIP_M32700_TS1 */
  256. );
  257. }
  258. static inline void arch_write_unlock(arch_rwlock_t *rw)
  259. {
  260. unsigned long tmp0, tmp1, tmp2;
  261. __asm__ __volatile__ (
  262. "# write_unlock \n\t"
  263. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  264. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  265. "mvfc %2, psw; \n\t"
  266. "clrpsw #0x40 -> nop; \n\t"
  267. DCACHE_CLEAR("%0", "r7", "%3")
  268. "lock %0, @%3; \n\t"
  269. "add %0, %1; \n\t"
  270. "unlock %0, @%3; \n\t"
  271. "mvtc %2, psw; \n\t"
  272. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  273. : "r" (&rw->lock)
  274. : "memory"
  275. #ifdef CONFIG_CHIP_M32700_TS1
  276. , "r7"
  277. #endif /* CONFIG_CHIP_M32700_TS1 */
  278. );
  279. }
  280. static inline int arch_read_trylock(arch_rwlock_t *lock)
  281. {
  282. atomic_t *count = (atomic_t*)lock;
  283. if (atomic_dec_return(count) >= 0)
  284. return 1;
  285. atomic_inc(count);
  286. return 0;
  287. }
  288. static inline int arch_write_trylock(arch_rwlock_t *lock)
  289. {
  290. atomic_t *count = (atomic_t *)lock;
  291. if (atomic_sub_and_test(RW_LOCK_BIAS, count))
  292. return 1;
  293. atomic_add(RW_LOCK_BIAS, count);
  294. return 0;
  295. }
  296. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  297. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  298. #define arch_spin_relax(lock) cpu_relax()
  299. #define arch_read_relax(lock) cpu_relax()
  300. #define arch_write_relax(lock) cpu_relax()
  301. #endif /* _ASM_M32R_SPINLOCK_H */