futex.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * These routines make two important assumptions:
  15. *
  16. * 1. atomic_t is really an int and can be freely cast back and forth
  17. * (validated in __init_atomic_per_cpu).
  18. *
  19. * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
  20. * the same locking convention that all the kernel atomic routines use.
  21. */
  22. #ifndef _ASM_TILE_FUTEX_H
  23. #define _ASM_TILE_FUTEX_H
  24. #ifndef __ASSEMBLY__
  25. #include <linux/futex.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/errno.h>
  28. #include <asm/atomic.h>
  29. /*
  30. * Support macros for futex operations. Do not use these macros directly.
  31. * They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
  32. * __futex_cmpxchg() additionally assumes "oldval".
  33. */
  34. #ifdef __tilegx__
  35. #define __futex_asm(OP) \
  36. asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \
  37. ".pushsection .fixup,\"ax\"\n" \
  38. "0: { movei %0, %5; j 9f }\n" \
  39. ".section __ex_table,\"a\"\n" \
  40. ".align 8\n" \
  41. ".quad 1b, 0b\n" \
  42. ".popsection\n" \
  43. "9:" \
  44. : "=r" (ret), "=r" (val), "+m" (*(uaddr)) \
  45. : "r" (uaddr), "r" (oparg), "i" (-EFAULT))
  46. #define __futex_set() __futex_asm(exch4)
  47. #define __futex_add() __futex_asm(fetchadd4)
  48. #define __futex_or() __futex_asm(fetchor4)
  49. #define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
  50. #define __futex_cmpxchg() \
  51. ({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
  52. #define __futex_xor() \
  53. ({ \
  54. u32 oldval, n = oparg; \
  55. if ((ret = __get_user(oldval, uaddr)) == 0) { \
  56. do { \
  57. oparg = oldval ^ n; \
  58. __futex_cmpxchg(); \
  59. } while (ret == 0 && oldval != val); \
  60. } \
  61. })
  62. /* No need to prefetch, since the atomic ops go to the home cache anyway. */
  63. #define __futex_prolog()
  64. #else
  65. #define __futex_call(FN) \
  66. { \
  67. struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
  68. val = gu.val; \
  69. ret = gu.err; \
  70. }
  71. #define __futex_set() __futex_call(__atomic_xchg)
  72. #define __futex_add() __futex_call(__atomic_xchg_add)
  73. #define __futex_or() __futex_call(__atomic_or)
  74. #define __futex_andn() __futex_call(__atomic_andn)
  75. #define __futex_xor() __futex_call(__atomic_xor)
  76. #define __futex_cmpxchg() \
  77. { \
  78. struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
  79. lock, oldval, oparg); \
  80. val = gu.val; \
  81. ret = gu.err; \
  82. }
  83. /*
  84. * Find the lock pointer for the atomic calls to use, and issue a
  85. * prefetch to the user address to bring it into cache. Similar to
  86. * __atomic_setup(), but we can't do a read into the L1 since it might
  87. * fault; instead we do a prefetch into the L2.
  88. */
  89. #define __futex_prolog() \
  90. int *lock; \
  91. __insn_prefetch(uaddr); \
  92. lock = __atomic_hashed_lock((int __force *)uaddr)
  93. #endif
  94. static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
  95. u32 __user *uaddr)
  96. {
  97. int uninitialized_var(val), ret;
  98. __futex_prolog();
  99. /* The 32-bit futex code makes this assumption, so validate it here. */
  100. BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
  101. pagefault_disable();
  102. switch (op) {
  103. case FUTEX_OP_SET:
  104. __futex_set();
  105. break;
  106. case FUTEX_OP_ADD:
  107. __futex_add();
  108. break;
  109. case FUTEX_OP_OR:
  110. __futex_or();
  111. break;
  112. case FUTEX_OP_ANDN:
  113. __futex_andn();
  114. break;
  115. case FUTEX_OP_XOR:
  116. __futex_xor();
  117. break;
  118. default:
  119. ret = -ENOSYS;
  120. break;
  121. }
  122. pagefault_enable();
  123. if (!ret)
  124. *oval = val;
  125. return ret;
  126. }
  127. static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
  128. u32 oldval, u32 oparg)
  129. {
  130. int ret, val;
  131. __futex_prolog();
  132. if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
  133. return -EFAULT;
  134. __futex_cmpxchg();
  135. *uval = val;
  136. return ret;
  137. }
  138. #endif /* !__ASSEMBLY__ */
  139. #endif /* _ASM_TILE_FUTEX_H */