bitops.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_AVR32_BITOPS_H
  9. #define __ASM_AVR32_BITOPS_H
  10. #ifndef _LINUX_BITOPS_H
  11. #error only <linux/bitops.h> can be included directly
  12. #endif
  13. #include <asm/byteorder.h>
  14. #include <asm/barrier.h>
  15. /*
  16. * set_bit - Atomically set a bit in memory
  17. * @nr: the bit to set
  18. * @addr: the address to start counting from
  19. *
  20. * This function is atomic and may not be reordered. See __set_bit()
  21. * if you do not require the atomic guarantees.
  22. *
  23. * Note that @nr may be almost arbitrarily large; this function is not
  24. * restricted to acting on a single-word quantity.
  25. */
  26. static inline void set_bit(int nr, volatile void * addr)
  27. {
  28. unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
  29. unsigned long tmp;
  30. if (__builtin_constant_p(nr)) {
  31. asm volatile(
  32. "1: ssrf 5\n"
  33. " ld.w %0, %2\n"
  34. " sbr %0, %3\n"
  35. " stcond %1, %0\n"
  36. " brne 1b"
  37. : "=&r"(tmp), "=o"(*p)
  38. : "m"(*p), "i"(nr)
  39. : "cc");
  40. } else {
  41. unsigned long mask = 1UL << (nr % BITS_PER_LONG);
  42. asm volatile(
  43. "1: ssrf 5\n"
  44. " ld.w %0, %2\n"
  45. " or %0, %3\n"
  46. " stcond %1, %0\n"
  47. " brne 1b"
  48. : "=&r"(tmp), "=o"(*p)
  49. : "m"(*p), "r"(mask)
  50. : "cc");
  51. }
  52. }
  53. /*
  54. * clear_bit - Clears a bit in memory
  55. * @nr: Bit to clear
  56. * @addr: Address to start counting from
  57. *
  58. * clear_bit() is atomic and may not be reordered. However, it does
  59. * not contain a memory barrier, so if it is used for locking purposes,
  60. * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  61. * in order to ensure changes are visible on other processors.
  62. */
  63. static inline void clear_bit(int nr, volatile void * addr)
  64. {
  65. unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
  66. unsigned long tmp;
  67. if (__builtin_constant_p(nr)) {
  68. asm volatile(
  69. "1: ssrf 5\n"
  70. " ld.w %0, %2\n"
  71. " cbr %0, %3\n"
  72. " stcond %1, %0\n"
  73. " brne 1b"
  74. : "=&r"(tmp), "=o"(*p)
  75. : "m"(*p), "i"(nr)
  76. : "cc");
  77. } else {
  78. unsigned long mask = 1UL << (nr % BITS_PER_LONG);
  79. asm volatile(
  80. "1: ssrf 5\n"
  81. " ld.w %0, %2\n"
  82. " andn %0, %3\n"
  83. " stcond %1, %0\n"
  84. " brne 1b"
  85. : "=&r"(tmp), "=o"(*p)
  86. : "m"(*p), "r"(mask)
  87. : "cc");
  88. }
  89. }
  90. /*
  91. * change_bit - Toggle a bit in memory
  92. * @nr: Bit to change
  93. * @addr: Address to start counting from
  94. *
  95. * change_bit() is atomic and may not be reordered.
  96. * Note that @nr may be almost arbitrarily large; this function is not
  97. * restricted to acting on a single-word quantity.
  98. */
  99. static inline void change_bit(int nr, volatile void * addr)
  100. {
  101. unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
  102. unsigned long mask = 1UL << (nr % BITS_PER_LONG);
  103. unsigned long tmp;
  104. asm volatile(
  105. "1: ssrf 5\n"
  106. " ld.w %0, %2\n"
  107. " eor %0, %3\n"
  108. " stcond %1, %0\n"
  109. " brne 1b"
  110. : "=&r"(tmp), "=o"(*p)
  111. : "m"(*p), "r"(mask)
  112. : "cc");
  113. }
  114. /*
  115. * test_and_set_bit - Set a bit and return its old value
  116. * @nr: Bit to set
  117. * @addr: Address to count from
  118. *
  119. * This operation is atomic and cannot be reordered.
  120. * It also implies a memory barrier.
  121. */
  122. static inline int test_and_set_bit(int nr, volatile void * addr)
  123. {
  124. unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
  125. unsigned long mask = 1UL << (nr % BITS_PER_LONG);
  126. unsigned long tmp, old;
  127. if (__builtin_constant_p(nr)) {
  128. asm volatile(
  129. "1: ssrf 5\n"
  130. " ld.w %0, %3\n"
  131. " mov %2, %0\n"
  132. " sbr %0, %4\n"
  133. " stcond %1, %0\n"
  134. " brne 1b"
  135. : "=&r"(tmp), "=o"(*p), "=&r"(old)
  136. : "m"(*p), "i"(nr)
  137. : "memory", "cc");
  138. } else {
  139. asm volatile(
  140. "1: ssrf 5\n"
  141. " ld.w %2, %3\n"
  142. " or %0, %2, %4\n"
  143. " stcond %1, %0\n"
  144. " brne 1b"
  145. : "=&r"(tmp), "=o"(*p), "=&r"(old)
  146. : "m"(*p), "r"(mask)
  147. : "memory", "cc");
  148. }
  149. return (old & mask) != 0;
  150. }
  151. /*
  152. * test_and_clear_bit - Clear a bit and return its old value
  153. * @nr: Bit to clear
  154. * @addr: Address to count from
  155. *
  156. * This operation is atomic and cannot be reordered.
  157. * It also implies a memory barrier.
  158. */
  159. static inline int test_and_clear_bit(int nr, volatile void * addr)
  160. {
  161. unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
  162. unsigned long mask = 1UL << (nr % BITS_PER_LONG);
  163. unsigned long tmp, old;
  164. if (__builtin_constant_p(nr)) {
  165. asm volatile(
  166. "1: ssrf 5\n"
  167. " ld.w %0, %3\n"
  168. " mov %2, %0\n"
  169. " cbr %0, %4\n"
  170. " stcond %1, %0\n"
  171. " brne 1b"
  172. : "=&r"(tmp), "=o"(*p), "=&r"(old)
  173. : "m"(*p), "i"(nr)
  174. : "memory", "cc");
  175. } else {
  176. asm volatile(
  177. "1: ssrf 5\n"
  178. " ld.w %0, %3\n"
  179. " mov %2, %0\n"
  180. " andn %0, %4\n"
  181. " stcond %1, %0\n"
  182. " brne 1b"
  183. : "=&r"(tmp), "=o"(*p), "=&r"(old)
  184. : "m"(*p), "r"(mask)
  185. : "memory", "cc");
  186. }
  187. return (old & mask) != 0;
  188. }
  189. /*
  190. * test_and_change_bit - Change a bit and return its old value
  191. * @nr: Bit to change
  192. * @addr: Address to count from
  193. *
  194. * This operation is atomic and cannot be reordered.
  195. * It also implies a memory barrier.
  196. */
  197. static inline int test_and_change_bit(int nr, volatile void * addr)
  198. {
  199. unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
  200. unsigned long mask = 1UL << (nr % BITS_PER_LONG);
  201. unsigned long tmp, old;
  202. asm volatile(
  203. "1: ssrf 5\n"
  204. " ld.w %2, %3\n"
  205. " eor %0, %2, %4\n"
  206. " stcond %1, %0\n"
  207. " brne 1b"
  208. : "=&r"(tmp), "=o"(*p), "=&r"(old)
  209. : "m"(*p), "r"(mask)
  210. : "memory", "cc");
  211. return (old & mask) != 0;
  212. }
  213. #include <asm-generic/bitops/non-atomic.h>
  214. /* Find First bit Set */
  215. static inline unsigned long __ffs(unsigned long word)
  216. {
  217. unsigned long result;
  218. asm("brev %1\n\t"
  219. "clz %0,%1"
  220. : "=r"(result), "=&r"(word)
  221. : "1"(word));
  222. return result;
  223. }
  224. /* Find First Zero */
  225. static inline unsigned long ffz(unsigned long word)
  226. {
  227. return __ffs(~word);
  228. }
  229. /* Find Last bit Set */
  230. static inline int fls(unsigned long word)
  231. {
  232. unsigned long result;
  233. asm("clz %0,%1" : "=r"(result) : "r"(word));
  234. return 32 - result;
  235. }
  236. static inline int __fls(unsigned long word)
  237. {
  238. return fls(word) - 1;
  239. }
  240. unsigned long find_first_zero_bit(const unsigned long *addr,
  241. unsigned long size);
  242. #define find_first_zero_bit find_first_zero_bit
  243. unsigned long find_next_zero_bit(const unsigned long *addr,
  244. unsigned long size,
  245. unsigned long offset);
  246. #define find_next_zero_bit find_next_zero_bit
  247. unsigned long find_first_bit(const unsigned long *addr,
  248. unsigned long size);
  249. #define find_first_bit find_first_bit
  250. unsigned long find_next_bit(const unsigned long *addr,
  251. unsigned long size,
  252. unsigned long offset);
  253. #define find_next_bit find_next_bit
  254. /*
  255. * ffs: find first bit set. This is defined the same way as
  256. * the libc and compiler builtin ffs routines, therefore
  257. * differs in spirit from the above ffz (man ffs).
  258. *
  259. * The difference is that bit numbering starts at 1, and if no bit is set,
  260. * the function returns 0.
  261. */
  262. static inline int ffs(unsigned long word)
  263. {
  264. if(word == 0)
  265. return 0;
  266. return __ffs(word) + 1;
  267. }
  268. #include <asm-generic/bitops/fls64.h>
  269. #include <asm-generic/bitops/sched.h>
  270. #include <asm-generic/bitops/hweight.h>
  271. #include <asm-generic/bitops/lock.h>
  272. extern unsigned long find_next_zero_bit_le(const void *addr,
  273. unsigned long size, unsigned long offset);
  274. #define find_next_zero_bit_le find_next_zero_bit_le
  275. extern unsigned long find_next_bit_le(const void *addr,
  276. unsigned long size, unsigned long offset);
  277. #define find_next_bit_le find_next_bit_le
  278. #include <asm-generic/bitops/le.h>
  279. #include <asm-generic/bitops/ext2-atomic.h>
  280. #endif /* __ASM_AVR32_BITOPS_H */