rwsem.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
  3. * in lib/rwsem.c.
  4. */
  5. #ifndef _ASM_SH_RWSEM_H
  6. #define _ASM_SH_RWSEM_H
  7. #ifndef _LINUX_RWSEM_H
  8. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  9. #endif
  10. #ifdef __KERNEL__
  11. #define RWSEM_UNLOCKED_VALUE 0x00000000
  12. #define RWSEM_ACTIVE_BIAS 0x00000001
  13. #define RWSEM_ACTIVE_MASK 0x0000ffff
  14. #define RWSEM_WAITING_BIAS (-0x00010000)
  15. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  16. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  17. /*
  18. * lock for reading
  19. */
  20. static inline void __down_read(struct rw_semaphore *sem)
  21. {
  22. if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
  23. smp_wmb();
  24. else
  25. rwsem_down_read_failed(sem);
  26. }
  27. static inline int __down_read_trylock(struct rw_semaphore *sem)
  28. {
  29. int tmp;
  30. while ((tmp = sem->count) >= 0) {
  31. if (tmp == cmpxchg(&sem->count, tmp,
  32. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  33. smp_wmb();
  34. return 1;
  35. }
  36. }
  37. return 0;
  38. }
  39. /*
  40. * lock for writing
  41. */
  42. static inline void __down_write(struct rw_semaphore *sem)
  43. {
  44. int tmp;
  45. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  46. (atomic_t *)(&sem->count));
  47. if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  48. smp_wmb();
  49. else
  50. rwsem_down_write_failed(sem);
  51. }
  52. static inline int __down_write_trylock(struct rw_semaphore *sem)
  53. {
  54. int tmp;
  55. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  56. RWSEM_ACTIVE_WRITE_BIAS);
  57. smp_wmb();
  58. return tmp == RWSEM_UNLOCKED_VALUE;
  59. }
  60. /*
  61. * unlock after reading
  62. */
  63. static inline void __up_read(struct rw_semaphore *sem)
  64. {
  65. int tmp;
  66. smp_wmb();
  67. tmp = atomic_dec_return((atomic_t *)(&sem->count));
  68. if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
  69. rwsem_wake(sem);
  70. }
  71. /*
  72. * unlock after writing
  73. */
  74. static inline void __up_write(struct rw_semaphore *sem)
  75. {
  76. smp_wmb();
  77. if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  78. (atomic_t *)(&sem->count)) < 0)
  79. rwsem_wake(sem);
  80. }
  81. /*
  82. * implement atomic add functionality
  83. */
  84. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  85. {
  86. atomic_add(delta, (atomic_t *)(&sem->count));
  87. }
  88. /*
  89. * downgrade write lock to read lock
  90. */
  91. static inline void __downgrade_write(struct rw_semaphore *sem)
  92. {
  93. int tmp;
  94. smp_wmb();
  95. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  96. if (tmp < 0)
  97. rwsem_downgrade_wake(sem);
  98. }
  99. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  100. {
  101. __down_write(sem);
  102. }
  103. /*
  104. * implement exchange and add functionality
  105. */
  106. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  107. {
  108. smp_mb();
  109. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  110. }
  111. #endif /* __KERNEL__ */
  112. #endif /* _ASM_SH_RWSEM_H */