spinlock_32.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /* spinlock.h: 32-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC_SPINLOCK_H
  6. #define __SPARC_SPINLOCK_H
  7. #ifndef __ASSEMBLY__
  8. #include <asm/psr.h>
  9. #include <asm/processor.h> /* for cpu_relax */
  10. #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
  11. #define arch_spin_unlock_wait(lock) \
  12. do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
  13. static inline void arch_spin_lock(arch_spinlock_t *lock)
  14. {
  15. __asm__ __volatile__(
  16. "\n1:\n\t"
  17. "ldstub [%0], %%g2\n\t"
  18. "orcc %%g2, 0x0, %%g0\n\t"
  19. "bne,a 2f\n\t"
  20. " ldub [%0], %%g2\n\t"
  21. ".subsection 2\n"
  22. "2:\n\t"
  23. "orcc %%g2, 0x0, %%g0\n\t"
  24. "bne,a 2b\n\t"
  25. " ldub [%0], %%g2\n\t"
  26. "b,a 1b\n\t"
  27. ".previous\n"
  28. : /* no outputs */
  29. : "r" (lock)
  30. : "g2", "memory", "cc");
  31. }
  32. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  33. {
  34. unsigned int result;
  35. __asm__ __volatile__("ldstub [%1], %0"
  36. : "=r" (result)
  37. : "r" (lock)
  38. : "memory");
  39. return (result == 0);
  40. }
  41. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  42. {
  43. __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
  44. }
  45. /* Read-write spinlocks, allowing multiple readers
  46. * but only one writer.
  47. *
  48. * NOTE! it is quite common to have readers in interrupts
  49. * but no interrupt writers. For those circumstances we
  50. * can "mix" irq-safe locks - any writer needs to get a
  51. * irq-safe write-lock, but readers can get non-irqsafe
  52. * read-locks.
  53. *
  54. * XXX This might create some problems with my dual spinlock
  55. * XXX scheme, deadlocks etc. -DaveM
  56. *
  57. * Sort of like atomic_t's on Sparc, but even more clever.
  58. *
  59. * ------------------------------------
  60. * | 24-bit counter | wlock | arch_rwlock_t
  61. * ------------------------------------
  62. * 31 8 7 0
  63. *
  64. * wlock signifies the one writer is in or somebody is updating
  65. * counter. For a writer, if he successfully acquires the wlock,
  66. * but counter is non-zero, he has to release the lock and wait,
  67. * till both counter and wlock are zero.
  68. *
  69. * Unfortunately this scheme limits us to ~16,000,000 cpus.
  70. */
  71. static inline void __arch_read_lock(arch_rwlock_t *rw)
  72. {
  73. register arch_rwlock_t *lp asm("g1");
  74. lp = rw;
  75. __asm__ __volatile__(
  76. "mov %%o7, %%g4\n\t"
  77. "call ___rw_read_enter\n\t"
  78. " ldstub [%%g1 + 3], %%g2\n"
  79. : /* no outputs */
  80. : "r" (lp)
  81. : "g2", "g4", "memory", "cc");
  82. }
  83. #define arch_read_lock(lock) \
  84. do { unsigned long flags; \
  85. local_irq_save(flags); \
  86. __arch_read_lock(lock); \
  87. local_irq_restore(flags); \
  88. } while(0)
  89. static inline void __arch_read_unlock(arch_rwlock_t *rw)
  90. {
  91. register arch_rwlock_t *lp asm("g1");
  92. lp = rw;
  93. __asm__ __volatile__(
  94. "mov %%o7, %%g4\n\t"
  95. "call ___rw_read_exit\n\t"
  96. " ldstub [%%g1 + 3], %%g2\n"
  97. : /* no outputs */
  98. : "r" (lp)
  99. : "g2", "g4", "memory", "cc");
  100. }
  101. #define arch_read_unlock(lock) \
  102. do { unsigned long flags; \
  103. local_irq_save(flags); \
  104. __arch_read_unlock(lock); \
  105. local_irq_restore(flags); \
  106. } while(0)
  107. static inline void arch_write_lock(arch_rwlock_t *rw)
  108. {
  109. register arch_rwlock_t *lp asm("g1");
  110. lp = rw;
  111. __asm__ __volatile__(
  112. "mov %%o7, %%g4\n\t"
  113. "call ___rw_write_enter\n\t"
  114. " ldstub [%%g1 + 3], %%g2\n"
  115. : /* no outputs */
  116. : "r" (lp)
  117. : "g2", "g4", "memory", "cc");
  118. *(volatile __u32 *)&lp->lock = ~0U;
  119. }
  120. static void inline arch_write_unlock(arch_rwlock_t *lock)
  121. {
  122. __asm__ __volatile__(
  123. " st %%g0, [%0]"
  124. : /* no outputs */
  125. : "r" (lock)
  126. : "memory");
  127. }
  128. static inline int arch_write_trylock(arch_rwlock_t *rw)
  129. {
  130. unsigned int val;
  131. __asm__ __volatile__("ldstub [%1 + 3], %0"
  132. : "=r" (val)
  133. : "r" (&rw->lock)
  134. : "memory");
  135. if (val == 0) {
  136. val = rw->lock & ~0xff;
  137. if (val)
  138. ((volatile u8*)&rw->lock)[3] = 0;
  139. else
  140. *(volatile u32*)&rw->lock = ~0U;
  141. }
  142. return (val == 0);
  143. }
  144. static inline int __arch_read_trylock(arch_rwlock_t *rw)
  145. {
  146. register arch_rwlock_t *lp asm("g1");
  147. register int res asm("o0");
  148. lp = rw;
  149. __asm__ __volatile__(
  150. "mov %%o7, %%g4\n\t"
  151. "call ___rw_read_try\n\t"
  152. " ldstub [%%g1 + 3], %%g2\n"
  153. : "=r" (res)
  154. : "r" (lp)
  155. : "g2", "g4", "memory", "cc");
  156. return res;
  157. }
  158. #define arch_read_trylock(lock) \
  159. ({ unsigned long flags; \
  160. int res; \
  161. local_irq_save(flags); \
  162. res = __arch_read_trylock(lock); \
  163. local_irq_restore(flags); \
  164. res; \
  165. })
  166. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  167. #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
  168. #define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
  169. #define arch_spin_relax(lock) cpu_relax()
  170. #define arch_read_relax(lock) cpu_relax()
  171. #define arch_write_relax(lock) cpu_relax()
  172. #define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
  173. #define arch_write_can_lock(rw) (!(rw)->lock)
  174. #endif /* !(__ASSEMBLY__) */
  175. #endif /* __SPARC_SPINLOCK_H */