mutex.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
  3. */
  4. #ifndef _ASM_POWERPC_MUTEX_H
  5. #define _ASM_POWERPC_MUTEX_H
  6. static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
  7. {
  8. int t;
  9. __asm__ __volatile__ (
  10. "1: lwarx %0,0,%1 # mutex trylock\n\
  11. cmpw 0,%0,%2\n\
  12. bne- 2f\n"
  13. PPC405_ERR77(0,%1)
  14. " stwcx. %3,0,%1\n\
  15. bne- 1b"
  16. PPC_ACQUIRE_BARRIER
  17. "\n\
  18. 2:"
  19. : "=&r" (t)
  20. : "r" (&v->counter), "r" (old), "r" (new)
  21. : "cc", "memory");
  22. return t;
  23. }
  24. static inline int __mutex_dec_return_lock(atomic_t *v)
  25. {
  26. int t;
  27. __asm__ __volatile__(
  28. "1: lwarx %0,0,%1 # mutex lock\n\
  29. addic %0,%0,-1\n"
  30. PPC405_ERR77(0,%1)
  31. " stwcx. %0,0,%1\n\
  32. bne- 1b"
  33. PPC_ACQUIRE_BARRIER
  34. : "=&r" (t)
  35. : "r" (&v->counter)
  36. : "cc", "memory");
  37. return t;
  38. }
  39. static inline int __mutex_inc_return_unlock(atomic_t *v)
  40. {
  41. int t;
  42. __asm__ __volatile__(
  43. PPC_RELEASE_BARRIER
  44. "1: lwarx %0,0,%1 # mutex unlock\n\
  45. addic %0,%0,1\n"
  46. PPC405_ERR77(0,%1)
  47. " stwcx. %0,0,%1 \n\
  48. bne- 1b"
  49. : "=&r" (t)
  50. : "r" (&v->counter)
  51. : "cc", "memory");
  52. return t;
  53. }
  54. /**
  55. * __mutex_fastpath_lock - try to take the lock by moving the count
  56. * from 1 to a 0 value
  57. * @count: pointer of type atomic_t
  58. * @fail_fn: function to call if the original value was not 1
  59. *
  60. * Change the count from 1 to a value lower than 1, and call <fail_fn> if
  61. * it wasn't 1 originally. This function MUST leave the value lower than
  62. * 1 even when the "1" assertion wasn't true.
  63. */
  64. static inline void
  65. __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
  66. {
  67. if (unlikely(__mutex_dec_return_lock(count) < 0))
  68. fail_fn(count);
  69. }
  70. /**
  71. * __mutex_fastpath_lock_retval - try to take the lock by moving the count
  72. * from 1 to a 0 value
  73. * @count: pointer of type atomic_t
  74. *
  75. * Change the count from 1 to a value lower than 1. This function returns 0
  76. * if the fastpath succeeds, or -1 otherwise.
  77. */
  78. static inline int
  79. __mutex_fastpath_lock_retval(atomic_t *count)
  80. {
  81. if (unlikely(__mutex_dec_return_lock(count) < 0))
  82. return -1;
  83. return 0;
  84. }
  85. /**
  86. * __mutex_fastpath_unlock - try to promote the count from 0 to 1
  87. * @count: pointer of type atomic_t
  88. * @fail_fn: function to call if the original value was not 0
  89. *
  90. * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
  91. * In the failure case, this function is allowed to either set the value to
  92. * 1, or to set it to a value lower than 1.
  93. */
  94. static inline void
  95. __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
  96. {
  97. if (unlikely(__mutex_inc_return_unlock(count) <= 0))
  98. fail_fn(count);
  99. }
  100. #define __mutex_slowpath_needs_to_unlock() 1
  101. /**
  102. * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
  103. *
  104. * @count: pointer of type atomic_t
  105. * @fail_fn: fallback function
  106. *
  107. * Change the count from 1 to 0, and return 1 (success), or if the count
  108. * was not 1, then return 0 (failure).
  109. */
  110. static inline int
  111. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  112. {
  113. if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1))
  114. return 1;
  115. return 0;
  116. }
  117. #endif