bitops.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. #ifndef _ASM_M32R_BITOPS_H
  2. #define _ASM_M32R_BITOPS_H
  3. /*
  4. * linux/include/asm-m32r/bitops.h
  5. *
  6. * Copyright 1992, Linus Torvalds.
  7. *
  8. * M32R version:
  9. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  10. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  11. */
  12. #ifndef _LINUX_BITOPS_H
  13. #error only <linux/bitops.h> can be included directly
  14. #endif
  15. #include <linux/compiler.h>
  16. #include <linux/irqflags.h>
  17. #include <asm/assembler.h>
  18. #include <asm/byteorder.h>
  19. #include <asm/dcache_clear.h>
  20. #include <asm/types.h>
  21. #include <asm/barrier.h>
  22. /*
  23. * These have to be done with inline assembly: that way the bit-setting
  24. * is guaranteed to be atomic. All bit operations return 0 if the bit
  25. * was cleared before the operation and != 0 if it was not.
  26. *
  27. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  28. */
  29. /**
  30. * set_bit - Atomically set a bit in memory
  31. * @nr: the bit to set
  32. * @addr: the address to start counting from
  33. *
  34. * This function is atomic and may not be reordered. See __set_bit()
  35. * if you do not require the atomic guarantees.
  36. * Note that @nr may be almost arbitrarily large; this function is not
  37. * restricted to acting on a single-word quantity.
  38. */
  39. static __inline__ void set_bit(int nr, volatile void * addr)
  40. {
  41. __u32 mask;
  42. volatile __u32 *a = addr;
  43. unsigned long flags;
  44. unsigned long tmp;
  45. a += (nr >> 5);
  46. mask = (1 << (nr & 0x1F));
  47. local_irq_save(flags);
  48. __asm__ __volatile__ (
  49. DCACHE_CLEAR("%0", "r6", "%1")
  50. M32R_LOCK" %0, @%1; \n\t"
  51. "or %0, %2; \n\t"
  52. M32R_UNLOCK" %0, @%1; \n\t"
  53. : "=&r" (tmp)
  54. : "r" (a), "r" (mask)
  55. : "memory"
  56. #ifdef CONFIG_CHIP_M32700_TS1
  57. , "r6"
  58. #endif /* CONFIG_CHIP_M32700_TS1 */
  59. );
  60. local_irq_restore(flags);
  61. }
  62. /**
  63. * clear_bit - Clears a bit in memory
  64. * @nr: Bit to clear
  65. * @addr: Address to start counting from
  66. *
  67. * clear_bit() is atomic and may not be reordered. However, it does
  68. * not contain a memory barrier, so if it is used for locking purposes,
  69. * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  70. * in order to ensure changes are visible on other processors.
  71. */
  72. static __inline__ void clear_bit(int nr, volatile void * addr)
  73. {
  74. __u32 mask;
  75. volatile __u32 *a = addr;
  76. unsigned long flags;
  77. unsigned long tmp;
  78. a += (nr >> 5);
  79. mask = (1 << (nr & 0x1F));
  80. local_irq_save(flags);
  81. __asm__ __volatile__ (
  82. DCACHE_CLEAR("%0", "r6", "%1")
  83. M32R_LOCK" %0, @%1; \n\t"
  84. "and %0, %2; \n\t"
  85. M32R_UNLOCK" %0, @%1; \n\t"
  86. : "=&r" (tmp)
  87. : "r" (a), "r" (~mask)
  88. : "memory"
  89. #ifdef CONFIG_CHIP_M32700_TS1
  90. , "r6"
  91. #endif /* CONFIG_CHIP_M32700_TS1 */
  92. );
  93. local_irq_restore(flags);
  94. }
  95. /**
  96. * change_bit - Toggle a bit in memory
  97. * @nr: Bit to clear
  98. * @addr: Address to start counting from
  99. *
  100. * change_bit() is atomic and may not be reordered.
  101. * Note that @nr may be almost arbitrarily large; this function is not
  102. * restricted to acting on a single-word quantity.
  103. */
  104. static __inline__ void change_bit(int nr, volatile void * addr)
  105. {
  106. __u32 mask;
  107. volatile __u32 *a = addr;
  108. unsigned long flags;
  109. unsigned long tmp;
  110. a += (nr >> 5);
  111. mask = (1 << (nr & 0x1F));
  112. local_irq_save(flags);
  113. __asm__ __volatile__ (
  114. DCACHE_CLEAR("%0", "r6", "%1")
  115. M32R_LOCK" %0, @%1; \n\t"
  116. "xor %0, %2; \n\t"
  117. M32R_UNLOCK" %0, @%1; \n\t"
  118. : "=&r" (tmp)
  119. : "r" (a), "r" (mask)
  120. : "memory"
  121. #ifdef CONFIG_CHIP_M32700_TS1
  122. , "r6"
  123. #endif /* CONFIG_CHIP_M32700_TS1 */
  124. );
  125. local_irq_restore(flags);
  126. }
  127. /**
  128. * test_and_set_bit - Set a bit and return its old value
  129. * @nr: Bit to set
  130. * @addr: Address to count from
  131. *
  132. * This operation is atomic and cannot be reordered.
  133. * It also implies a memory barrier.
  134. */
  135. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  136. {
  137. __u32 mask, oldbit;
  138. volatile __u32 *a = addr;
  139. unsigned long flags;
  140. unsigned long tmp;
  141. a += (nr >> 5);
  142. mask = (1 << (nr & 0x1F));
  143. local_irq_save(flags);
  144. __asm__ __volatile__ (
  145. DCACHE_CLEAR("%0", "%1", "%2")
  146. M32R_LOCK" %0, @%2; \n\t"
  147. "mv %1, %0; \n\t"
  148. "and %0, %3; \n\t"
  149. "or %1, %3; \n\t"
  150. M32R_UNLOCK" %1, @%2; \n\t"
  151. : "=&r" (oldbit), "=&r" (tmp)
  152. : "r" (a), "r" (mask)
  153. : "memory"
  154. );
  155. local_irq_restore(flags);
  156. return (oldbit != 0);
  157. }
  158. /**
  159. * test_and_clear_bit - Clear a bit and return its old value
  160. * @nr: Bit to set
  161. * @addr: Address to count from
  162. *
  163. * This operation is atomic and cannot be reordered.
  164. * It also implies a memory barrier.
  165. */
  166. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  167. {
  168. __u32 mask, oldbit;
  169. volatile __u32 *a = addr;
  170. unsigned long flags;
  171. unsigned long tmp;
  172. a += (nr >> 5);
  173. mask = (1 << (nr & 0x1F));
  174. local_irq_save(flags);
  175. __asm__ __volatile__ (
  176. DCACHE_CLEAR("%0", "%1", "%3")
  177. M32R_LOCK" %0, @%3; \n\t"
  178. "mv %1, %0; \n\t"
  179. "and %0, %2; \n\t"
  180. "not %2, %2; \n\t"
  181. "and %1, %2; \n\t"
  182. M32R_UNLOCK" %1, @%3; \n\t"
  183. : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
  184. : "r" (a)
  185. : "memory"
  186. );
  187. local_irq_restore(flags);
  188. return (oldbit != 0);
  189. }
  190. /**
  191. * test_and_change_bit - Change a bit and return its old value
  192. * @nr: Bit to set
  193. * @addr: Address to count from
  194. *
  195. * This operation is atomic and cannot be reordered.
  196. * It also implies a memory barrier.
  197. */
  198. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  199. {
  200. __u32 mask, oldbit;
  201. volatile __u32 *a = addr;
  202. unsigned long flags;
  203. unsigned long tmp;
  204. a += (nr >> 5);
  205. mask = (1 << (nr & 0x1F));
  206. local_irq_save(flags);
  207. __asm__ __volatile__ (
  208. DCACHE_CLEAR("%0", "%1", "%2")
  209. M32R_LOCK" %0, @%2; \n\t"
  210. "mv %1, %0; \n\t"
  211. "and %0, %3; \n\t"
  212. "xor %1, %3; \n\t"
  213. M32R_UNLOCK" %1, @%2; \n\t"
  214. : "=&r" (oldbit), "=&r" (tmp)
  215. : "r" (a), "r" (mask)
  216. : "memory"
  217. );
  218. local_irq_restore(flags);
  219. return (oldbit != 0);
  220. }
  221. #include <asm-generic/bitops/non-atomic.h>
  222. #include <asm-generic/bitops/ffz.h>
  223. #include <asm-generic/bitops/__ffs.h>
  224. #include <asm-generic/bitops/fls.h>
  225. #include <asm-generic/bitops/__fls.h>
  226. #include <asm-generic/bitops/fls64.h>
  227. #ifdef __KERNEL__
  228. #include <asm-generic/bitops/sched.h>
  229. #include <asm-generic/bitops/find.h>
  230. #include <asm-generic/bitops/ffs.h>
  231. #include <asm-generic/bitops/hweight.h>
  232. #include <asm-generic/bitops/lock.h>
  233. #endif /* __KERNEL__ */
  234. #ifdef __KERNEL__
  235. #include <asm-generic/bitops/le.h>
  236. #include <asm-generic/bitops/ext2-atomic.h>
  237. #endif /* __KERNEL__ */
  238. #endif /* _ASM_M32R_BITOPS_H */