spinlock.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_SPINLOCK_H
  9. #define __ASM_SPINLOCK_H
  10. #include <asm/spinlock_types.h>
  11. #include <asm/processor.h>
  12. #include <asm/barrier.h>
  13. #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
  14. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  15. #define arch_spin_unlock_wait(x) \
  16. do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
  17. #ifdef CONFIG_ARC_HAS_LLSC
  18. /*
  19. * A normal LLOCK/SCOND based system, w/o need for livelock workaround
  20. */
  21. #ifndef CONFIG_ARC_STAR_9000923308
  22. static inline void arch_spin_lock(arch_spinlock_t *lock)
  23. {
  24. unsigned int val;
  25. smp_mb();
  26. __asm__ __volatile__(
  27. "1: llock %[val], [%[slock]] \n"
  28. " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
  29. " scond %[LOCKED], [%[slock]] \n" /* acquire */
  30. " bnz 1b \n"
  31. " \n"
  32. : [val] "=&r" (val)
  33. : [slock] "r" (&(lock->slock)),
  34. [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
  35. : "memory", "cc");
  36. smp_mb();
  37. }
  38. /* 1 - lock taken successfully */
  39. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  40. {
  41. unsigned int val, got_it = 0;
  42. smp_mb();
  43. __asm__ __volatile__(
  44. "1: llock %[val], [%[slock]] \n"
  45. " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
  46. " scond %[LOCKED], [%[slock]] \n" /* acquire */
  47. " bnz 1b \n"
  48. " mov %[got_it], 1 \n"
  49. "4: \n"
  50. " \n"
  51. : [val] "=&r" (val),
  52. [got_it] "+&r" (got_it)
  53. : [slock] "r" (&(lock->slock)),
  54. [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
  55. : "memory", "cc");
  56. smp_mb();
  57. return got_it;
  58. }
  59. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  60. {
  61. smp_mb();
  62. lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
  63. smp_mb();
  64. }
  65. /*
  66. * Read-write spinlocks, allowing multiple readers but only one writer.
  67. * Unfair locking as Writers could be starved indefinitely by Reader(s)
  68. */
  69. static inline void arch_read_lock(arch_rwlock_t *rw)
  70. {
  71. unsigned int val;
  72. smp_mb();
  73. /*
  74. * zero means writer holds the lock exclusively, deny Reader.
  75. * Otherwise grant lock to first/subseq reader
  76. *
  77. * if (rw->counter > 0) {
  78. * rw->counter--;
  79. * ret = 1;
  80. * }
  81. */
  82. __asm__ __volatile__(
  83. "1: llock %[val], [%[rwlock]] \n"
  84. " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
  85. " sub %[val], %[val], 1 \n" /* reader lock */
  86. " scond %[val], [%[rwlock]] \n"
  87. " bnz 1b \n"
  88. " \n"
  89. : [val] "=&r" (val)
  90. : [rwlock] "r" (&(rw->counter)),
  91. [WR_LOCKED] "ir" (0)
  92. : "memory", "cc");
  93. smp_mb();
  94. }
  95. /* 1 - lock taken successfully */
  96. static inline int arch_read_trylock(arch_rwlock_t *rw)
  97. {
  98. unsigned int val, got_it = 0;
  99. smp_mb();
  100. __asm__ __volatile__(
  101. "1: llock %[val], [%[rwlock]] \n"
  102. " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
  103. " sub %[val], %[val], 1 \n" /* counter-- */
  104. " scond %[val], [%[rwlock]] \n"
  105. " bnz 1b \n" /* retry if collided with someone */
  106. " mov %[got_it], 1 \n"
  107. " \n"
  108. "4: ; --- done --- \n"
  109. : [val] "=&r" (val),
  110. [got_it] "+&r" (got_it)
  111. : [rwlock] "r" (&(rw->counter)),
  112. [WR_LOCKED] "ir" (0)
  113. : "memory", "cc");
  114. smp_mb();
  115. return got_it;
  116. }
  117. static inline void arch_write_lock(arch_rwlock_t *rw)
  118. {
  119. unsigned int val;
  120. smp_mb();
  121. /*
  122. * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
  123. * deny writer. Otherwise if unlocked grant to writer
  124. * Hence the claim that Linux rwlocks are unfair to writers.
  125. * (can be starved for an indefinite time by readers).
  126. *
  127. * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
  128. * rw->counter = 0;
  129. * ret = 1;
  130. * }
  131. */
  132. __asm__ __volatile__(
  133. "1: llock %[val], [%[rwlock]] \n"
  134. " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
  135. " mov %[val], %[WR_LOCKED] \n"
  136. " scond %[val], [%[rwlock]] \n"
  137. " bnz 1b \n"
  138. " \n"
  139. : [val] "=&r" (val)
  140. : [rwlock] "r" (&(rw->counter)),
  141. [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
  142. [WR_LOCKED] "ir" (0)
  143. : "memory", "cc");
  144. smp_mb();
  145. }
  146. /* 1 - lock taken successfully */
  147. static inline int arch_write_trylock(arch_rwlock_t *rw)
  148. {
  149. unsigned int val, got_it = 0;
  150. smp_mb();
  151. __asm__ __volatile__(
  152. "1: llock %[val], [%[rwlock]] \n"
  153. " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
  154. " mov %[val], %[WR_LOCKED] \n"
  155. " scond %[val], [%[rwlock]] \n"
  156. " bnz 1b \n" /* retry if collided with someone */
  157. " mov %[got_it], 1 \n"
  158. " \n"
  159. "4: ; --- done --- \n"
  160. : [val] "=&r" (val),
  161. [got_it] "+&r" (got_it)
  162. : [rwlock] "r" (&(rw->counter)),
  163. [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
  164. [WR_LOCKED] "ir" (0)
  165. : "memory", "cc");
  166. smp_mb();
  167. return got_it;
  168. }
  169. static inline void arch_read_unlock(arch_rwlock_t *rw)
  170. {
  171. unsigned int val;
  172. smp_mb();
  173. /*
  174. * rw->counter++;
  175. */
  176. __asm__ __volatile__(
  177. "1: llock %[val], [%[rwlock]] \n"
  178. " add %[val], %[val], 1 \n"
  179. " scond %[val], [%[rwlock]] \n"
  180. " bnz 1b \n"
  181. " \n"
  182. : [val] "=&r" (val)
  183. : [rwlock] "r" (&(rw->counter))
  184. : "memory", "cc");
  185. smp_mb();
  186. }
  187. static inline void arch_write_unlock(arch_rwlock_t *rw)
  188. {
  189. smp_mb();
  190. rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
  191. smp_mb();
  192. }
  193. #else /* CONFIG_ARC_STAR_9000923308 */
  194. /*
  195. * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
  196. * coherency transactions in the SCU. The exclusive line state keeps rotating
  197. * among contenting cores leading to a never ending cycle. So break the cycle
  198. * by deferring the retry of failed exclusive access (SCOND). The actual delay
  199. * needed is function of number of contending cores as well as the unrelated
  200. * coherency traffic from other cores. To keep the code simple, start off with
  201. * small delay of 1 which would suffice most cases and in case of contention
  202. * double the delay. Eventually the delay is sufficient such that the coherency
  203. * pipeline is drained, thus a subsequent exclusive access would succeed.
  204. */
  205. #define SCOND_FAIL_RETRY_VAR_DEF \
  206. unsigned int delay, tmp; \
  207. #define SCOND_FAIL_RETRY_ASM \
  208. " ; --- scond fail delay --- \n" \
  209. " mov %[tmp], %[delay] \n" /* tmp = delay */ \
  210. "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
  211. " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
  212. " rol %[delay], %[delay] \n" /* delay *= 2 */ \
  213. " b 1b \n" /* start over */ \
  214. " \n" \
  215. "4: ; --- done --- \n" \
  216. #define SCOND_FAIL_RETRY_VARS \
  217. ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
  218. static inline void arch_spin_lock(arch_spinlock_t *lock)
  219. {
  220. unsigned int val;
  221. SCOND_FAIL_RETRY_VAR_DEF;
  222. smp_mb();
  223. __asm__ __volatile__(
  224. "0: mov %[delay], 1 \n"
  225. "1: llock %[val], [%[slock]] \n"
  226. " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
  227. " scond %[LOCKED], [%[slock]] \n" /* acquire */
  228. " bz 4f \n" /* done */
  229. " \n"
  230. SCOND_FAIL_RETRY_ASM
  231. : [val] "=&r" (val)
  232. SCOND_FAIL_RETRY_VARS
  233. : [slock] "r" (&(lock->slock)),
  234. [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
  235. : "memory", "cc");
  236. smp_mb();
  237. }
  238. /* 1 - lock taken successfully */
  239. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  240. {
  241. unsigned int val, got_it = 0;
  242. SCOND_FAIL_RETRY_VAR_DEF;
  243. smp_mb();
  244. __asm__ __volatile__(
  245. "0: mov %[delay], 1 \n"
  246. "1: llock %[val], [%[slock]] \n"
  247. " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
  248. " scond %[LOCKED], [%[slock]] \n" /* acquire */
  249. " bz.d 4f \n"
  250. " mov.z %[got_it], 1 \n" /* got it */
  251. " \n"
  252. SCOND_FAIL_RETRY_ASM
  253. : [val] "=&r" (val),
  254. [got_it] "+&r" (got_it)
  255. SCOND_FAIL_RETRY_VARS
  256. : [slock] "r" (&(lock->slock)),
  257. [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
  258. : "memory", "cc");
  259. smp_mb();
  260. return got_it;
  261. }
  262. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  263. {
  264. smp_mb();
  265. lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
  266. smp_mb();
  267. }
  268. /*
  269. * Read-write spinlocks, allowing multiple readers but only one writer.
  270. * Unfair locking as Writers could be starved indefinitely by Reader(s)
  271. */
  272. static inline void arch_read_lock(arch_rwlock_t *rw)
  273. {
  274. unsigned int val;
  275. SCOND_FAIL_RETRY_VAR_DEF;
  276. smp_mb();
  277. /*
  278. * zero means writer holds the lock exclusively, deny Reader.
  279. * Otherwise grant lock to first/subseq reader
  280. *
  281. * if (rw->counter > 0) {
  282. * rw->counter--;
  283. * ret = 1;
  284. * }
  285. */
  286. __asm__ __volatile__(
  287. "0: mov %[delay], 1 \n"
  288. "1: llock %[val], [%[rwlock]] \n"
  289. " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
  290. " sub %[val], %[val], 1 \n" /* reader lock */
  291. " scond %[val], [%[rwlock]] \n"
  292. " bz 4f \n" /* done */
  293. " \n"
  294. SCOND_FAIL_RETRY_ASM
  295. : [val] "=&r" (val)
  296. SCOND_FAIL_RETRY_VARS
  297. : [rwlock] "r" (&(rw->counter)),
  298. [WR_LOCKED] "ir" (0)
  299. : "memory", "cc");
  300. smp_mb();
  301. }
  302. /* 1 - lock taken successfully */
  303. static inline int arch_read_trylock(arch_rwlock_t *rw)
  304. {
  305. unsigned int val, got_it = 0;
  306. SCOND_FAIL_RETRY_VAR_DEF;
  307. smp_mb();
  308. __asm__ __volatile__(
  309. "0: mov %[delay], 1 \n"
  310. "1: llock %[val], [%[rwlock]] \n"
  311. " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
  312. " sub %[val], %[val], 1 \n" /* counter-- */
  313. " scond %[val], [%[rwlock]] \n"
  314. " bz.d 4f \n"
  315. " mov.z %[got_it], 1 \n" /* got it */
  316. " \n"
  317. SCOND_FAIL_RETRY_ASM
  318. : [val] "=&r" (val),
  319. [got_it] "+&r" (got_it)
  320. SCOND_FAIL_RETRY_VARS
  321. : [rwlock] "r" (&(rw->counter)),
  322. [WR_LOCKED] "ir" (0)
  323. : "memory", "cc");
  324. smp_mb();
  325. return got_it;
  326. }
  327. static inline void arch_write_lock(arch_rwlock_t *rw)
  328. {
  329. unsigned int val;
  330. SCOND_FAIL_RETRY_VAR_DEF;
  331. smp_mb();
  332. /*
  333. * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
  334. * deny writer. Otherwise if unlocked grant to writer
  335. * Hence the claim that Linux rwlocks are unfair to writers.
  336. * (can be starved for an indefinite time by readers).
  337. *
  338. * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
  339. * rw->counter = 0;
  340. * ret = 1;
  341. * }
  342. */
  343. __asm__ __volatile__(
  344. "0: mov %[delay], 1 \n"
  345. "1: llock %[val], [%[rwlock]] \n"
  346. " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
  347. " mov %[val], %[WR_LOCKED] \n"
  348. " scond %[val], [%[rwlock]] \n"
  349. " bz 4f \n"
  350. " \n"
  351. SCOND_FAIL_RETRY_ASM
  352. : [val] "=&r" (val)
  353. SCOND_FAIL_RETRY_VARS
  354. : [rwlock] "r" (&(rw->counter)),
  355. [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
  356. [WR_LOCKED] "ir" (0)
  357. : "memory", "cc");
  358. smp_mb();
  359. }
  360. /* 1 - lock taken successfully */
  361. static inline int arch_write_trylock(arch_rwlock_t *rw)
  362. {
  363. unsigned int val, got_it = 0;
  364. SCOND_FAIL_RETRY_VAR_DEF;
  365. smp_mb();
  366. __asm__ __volatile__(
  367. "0: mov %[delay], 1 \n"
  368. "1: llock %[val], [%[rwlock]] \n"
  369. " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
  370. " mov %[val], %[WR_LOCKED] \n"
  371. " scond %[val], [%[rwlock]] \n"
  372. " bz.d 4f \n"
  373. " mov.z %[got_it], 1 \n" /* got it */
  374. " \n"
  375. SCOND_FAIL_RETRY_ASM
  376. : [val] "=&r" (val),
  377. [got_it] "+&r" (got_it)
  378. SCOND_FAIL_RETRY_VARS
  379. : [rwlock] "r" (&(rw->counter)),
  380. [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
  381. [WR_LOCKED] "ir" (0)
  382. : "memory", "cc");
  383. smp_mb();
  384. return got_it;
  385. }
  386. static inline void arch_read_unlock(arch_rwlock_t *rw)
  387. {
  388. unsigned int val;
  389. smp_mb();
  390. /*
  391. * rw->counter++;
  392. */
  393. __asm__ __volatile__(
  394. "1: llock %[val], [%[rwlock]] \n"
  395. " add %[val], %[val], 1 \n"
  396. " scond %[val], [%[rwlock]] \n"
  397. " bnz 1b \n"
  398. " \n"
  399. : [val] "=&r" (val)
  400. : [rwlock] "r" (&(rw->counter))
  401. : "memory", "cc");
  402. smp_mb();
  403. }
  404. static inline void arch_write_unlock(arch_rwlock_t *rw)
  405. {
  406. unsigned int val;
  407. smp_mb();
  408. /*
  409. * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
  410. */
  411. __asm__ __volatile__(
  412. "1: llock %[val], [%[rwlock]] \n"
  413. " scond %[UNLOCKED], [%[rwlock]]\n"
  414. " bnz 1b \n"
  415. " \n"
  416. : [val] "=&r" (val)
  417. : [rwlock] "r" (&(rw->counter)),
  418. [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
  419. : "memory", "cc");
  420. smp_mb();
  421. }
  422. #undef SCOND_FAIL_RETRY_VAR_DEF
  423. #undef SCOND_FAIL_RETRY_ASM
  424. #undef SCOND_FAIL_RETRY_VARS
  425. #endif /* CONFIG_ARC_STAR_9000923308 */
  426. #else /* !CONFIG_ARC_HAS_LLSC */
  427. static inline void arch_spin_lock(arch_spinlock_t *lock)
  428. {
  429. unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
  430. /*
  431. * This smp_mb() is technically superfluous, we only need the one
  432. * after the lock for providing the ACQUIRE semantics.
  433. * However doing the "right" thing was regressing hackbench
  434. * so keeping this, pending further investigation
  435. */
  436. smp_mb();
  437. __asm__ __volatile__(
  438. "1: ex %0, [%1] \n"
  439. " breq %0, %2, 1b \n"
  440. : "+&r" (val)
  441. : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
  442. : "memory");
  443. /*
  444. * ACQUIRE barrier to ensure load/store after taking the lock
  445. * don't "bleed-up" out of the critical section (leak-in is allowed)
  446. * http://www.spinics.net/lists/kernel/msg2010409.html
  447. *
  448. * ARCv2 only has load-load, store-store and all-all barrier
  449. * thus need the full all-all barrier
  450. */
  451. smp_mb();
  452. }
  453. /* 1 - lock taken successfully */
  454. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  455. {
  456. unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
  457. smp_mb();
  458. __asm__ __volatile__(
  459. "1: ex %0, [%1] \n"
  460. : "+r" (val)
  461. : "r"(&(lock->slock))
  462. : "memory");
  463. smp_mb();
  464. return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
  465. }
  466. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  467. {
  468. unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
  469. /*
  470. * RELEASE barrier: given the instructions avail on ARCv2, full barrier
  471. * is the only option
  472. */
  473. smp_mb();
  474. __asm__ __volatile__(
  475. " ex %0, [%1] \n"
  476. : "+r" (val)
  477. : "r"(&(lock->slock))
  478. : "memory");
  479. /*
  480. * superfluous, but keeping for now - see pairing version in
  481. * arch_spin_lock above
  482. */
  483. smp_mb();
  484. }
  485. /*
  486. * Read-write spinlocks, allowing multiple readers but only one writer.
  487. * Unfair locking as Writers could be starved indefinitely by Reader(s)
  488. *
  489. * The spinlock itself is contained in @counter and access to it is
  490. * serialized with @lock_mutex.
  491. */
  492. /* 1 - lock taken successfully */
  493. static inline int arch_read_trylock(arch_rwlock_t *rw)
  494. {
  495. int ret = 0;
  496. arch_spin_lock(&(rw->lock_mutex));
  497. /*
  498. * zero means writer holds the lock exclusively, deny Reader.
  499. * Otherwise grant lock to first/subseq reader
  500. */
  501. if (rw->counter > 0) {
  502. rw->counter--;
  503. ret = 1;
  504. }
  505. arch_spin_unlock(&(rw->lock_mutex));
  506. smp_mb();
  507. return ret;
  508. }
  509. /* 1 - lock taken successfully */
  510. static inline int arch_write_trylock(arch_rwlock_t *rw)
  511. {
  512. int ret = 0;
  513. arch_spin_lock(&(rw->lock_mutex));
  514. /*
  515. * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
  516. * deny writer. Otherwise if unlocked grant to writer
  517. * Hence the claim that Linux rwlocks are unfair to writers.
  518. * (can be starved for an indefinite time by readers).
  519. */
  520. if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
  521. rw->counter = 0;
  522. ret = 1;
  523. }
  524. arch_spin_unlock(&(rw->lock_mutex));
  525. return ret;
  526. }
  527. static inline void arch_read_lock(arch_rwlock_t *rw)
  528. {
  529. while (!arch_read_trylock(rw))
  530. cpu_relax();
  531. }
  532. static inline void arch_write_lock(arch_rwlock_t *rw)
  533. {
  534. while (!arch_write_trylock(rw))
  535. cpu_relax();
  536. }
  537. static inline void arch_read_unlock(arch_rwlock_t *rw)
  538. {
  539. arch_spin_lock(&(rw->lock_mutex));
  540. rw->counter++;
  541. arch_spin_unlock(&(rw->lock_mutex));
  542. }
  543. static inline void arch_write_unlock(arch_rwlock_t *rw)
  544. {
  545. arch_spin_lock(&(rw->lock_mutex));
  546. rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
  547. arch_spin_unlock(&(rw->lock_mutex));
  548. }
  549. #endif
  550. #define arch_read_can_lock(x) ((x)->counter > 0)
  551. #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
  552. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  553. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  554. #define arch_spin_relax(lock) cpu_relax()
  555. #define arch_read_relax(lock) cpu_relax()
  556. #define arch_write_relax(lock) cpu_relax()
  557. #endif /* __ASM_SPINLOCK_H */