uaccess.h 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_AVR32_UACCESS_H
  9. #define __ASM_AVR32_UACCESS_H
  10. #include <linux/errno.h>
  11. #include <linux/sched.h>
  12. #define VERIFY_READ 0
  13. #define VERIFY_WRITE 1
  14. typedef struct {
  15. unsigned int is_user_space;
  16. } mm_segment_t;
  17. /*
  18. * The fs value determines whether argument validity checking should be
  19. * performed or not. If get_fs() == USER_DS, checking is performed, with
  20. * get_fs() == KERNEL_DS, checking is bypassed.
  21. *
  22. * For historical reasons (Data Segment Register?), these macros are misnamed.
  23. */
  24. #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  25. #define segment_eq(a, b) ((a).is_user_space == (b).is_user_space)
  26. #define USER_ADDR_LIMIT 0x80000000
  27. #define KERNEL_DS MAKE_MM_SEG(0)
  28. #define USER_DS MAKE_MM_SEG(1)
  29. #define get_ds() (KERNEL_DS)
  30. static inline mm_segment_t get_fs(void)
  31. {
  32. return MAKE_MM_SEG(test_thread_flag(TIF_USERSPACE));
  33. }
  34. static inline void set_fs(mm_segment_t s)
  35. {
  36. if (s.is_user_space)
  37. set_thread_flag(TIF_USERSPACE);
  38. else
  39. clear_thread_flag(TIF_USERSPACE);
  40. }
  41. /*
  42. * Test whether a block of memory is a valid user space address.
  43. * Returns 0 if the range is valid, nonzero otherwise.
  44. *
  45. * We do the following checks:
  46. * 1. Is the access from kernel space?
  47. * 2. Does (addr + size) set the carry bit?
  48. * 3. Is (addr + size) a negative number (i.e. >= 0x80000000)?
  49. *
  50. * If yes on the first check, access is granted.
  51. * If no on any of the others, access is denied.
  52. */
  53. #define __range_ok(addr, size) \
  54. (test_thread_flag(TIF_USERSPACE) \
  55. && (((unsigned long)(addr) >= 0x80000000) \
  56. || ((unsigned long)(size) > 0x80000000) \
  57. || (((unsigned long)(addr) + (unsigned long)(size)) > 0x80000000)))
  58. #define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
  59. /* Generic arbitrary sized copy. Return the number of bytes NOT copied */
  60. extern __kernel_size_t __copy_user(void *to, const void *from,
  61. __kernel_size_t n);
  62. extern __kernel_size_t copy_to_user(void __user *to, const void *from,
  63. __kernel_size_t n);
  64. extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
  65. __kernel_size_t n);
  66. static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
  67. __kernel_size_t n)
  68. {
  69. return __copy_user((void __force *)to, from, n);
  70. }
  71. static inline __kernel_size_t __copy_from_user(void *to,
  72. const void __user *from,
  73. __kernel_size_t n)
  74. {
  75. return __copy_user(to, (const void __force *)from, n);
  76. }
  77. static inline __kernel_size_t copy_from_user(void *to,
  78. const void __user *from,
  79. __kernel_size_t n)
  80. {
  81. size_t res = ___copy_from_user(to, from, n);
  82. if (unlikely(res))
  83. memset(to + (n - res), 0, res);
  84. return res;
  85. }
  86. #define __copy_to_user_inatomic __copy_to_user
  87. #define __copy_from_user_inatomic __copy_from_user
  88. /*
  89. * put_user: - Write a simple value into user space.
  90. * @x: Value to copy to user space.
  91. * @ptr: Destination address, in user space.
  92. *
  93. * Context: User context only. This function may sleep if pagefaults are
  94. * enabled.
  95. *
  96. * This macro copies a single simple value from kernel space to user
  97. * space. It supports simple types like char and int, but not larger
  98. * data types like structures or arrays.
  99. *
  100. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  101. * to the result of dereferencing @ptr.
  102. *
  103. * Returns zero on success, or -EFAULT on error.
  104. */
  105. #define put_user(x, ptr) \
  106. __put_user_check((x), (ptr), sizeof(*(ptr)))
  107. /*
  108. * get_user: - Get a simple variable from user space.
  109. * @x: Variable to store result.
  110. * @ptr: Source address, in user space.
  111. *
  112. * Context: User context only. This function may sleep if pagefaults are
  113. * enabled.
  114. *
  115. * This macro copies a single simple variable from user space to kernel
  116. * space. It supports simple types like char and int, but not larger
  117. * data types like structures or arrays.
  118. *
  119. * @ptr must have pointer-to-simple-variable type, and the result of
  120. * dereferencing @ptr must be assignable to @x without a cast.
  121. *
  122. * Returns zero on success, or -EFAULT on error.
  123. * On error, the variable @x is set to zero.
  124. */
  125. #define get_user(x, ptr) \
  126. __get_user_check((x), (ptr), sizeof(*(ptr)))
  127. /*
  128. * __put_user: - Write a simple value into user space, with less checking.
  129. * @x: Value to copy to user space.
  130. * @ptr: Destination address, in user space.
  131. *
  132. * Context: User context only. This function may sleep if pagefaults are
  133. * enabled.
  134. *
  135. * This macro copies a single simple value from kernel space to user
  136. * space. It supports simple types like char and int, but not larger
  137. * data types like structures or arrays.
  138. *
  139. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  140. * to the result of dereferencing @ptr.
  141. *
  142. * Caller must check the pointer with access_ok() before calling this
  143. * function.
  144. *
  145. * Returns zero on success, or -EFAULT on error.
  146. */
  147. #define __put_user(x, ptr) \
  148. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  149. /*
  150. * __get_user: - Get a simple variable from user space, with less checking.
  151. * @x: Variable to store result.
  152. * @ptr: Source address, in user space.
  153. *
  154. * Context: User context only. This function may sleep if pagefaults are
  155. * enabled.
  156. *
  157. * This macro copies a single simple variable from user space to kernel
  158. * space. It supports simple types like char and int, but not larger
  159. * data types like structures or arrays.
  160. *
  161. * @ptr must have pointer-to-simple-variable type, and the result of
  162. * dereferencing @ptr must be assignable to @x without a cast.
  163. *
  164. * Caller must check the pointer with access_ok() before calling this
  165. * function.
  166. *
  167. * Returns zero on success, or -EFAULT on error.
  168. * On error, the variable @x is set to zero.
  169. */
  170. #define __get_user(x, ptr) \
  171. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  172. extern int __get_user_bad(void);
  173. extern int __put_user_bad(void);
  174. #define __get_user_nocheck(x, ptr, size) \
  175. ({ \
  176. unsigned long __gu_val = 0; \
  177. int __gu_err = 0; \
  178. \
  179. switch (size) { \
  180. case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break; \
  181. case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break; \
  182. case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break; \
  183. default: __gu_err = __get_user_bad(); break; \
  184. } \
  185. \
  186. x = (__force typeof(*(ptr)))__gu_val; \
  187. __gu_err; \
  188. })
  189. #define __get_user_check(x, ptr, size) \
  190. ({ \
  191. unsigned long __gu_val = 0; \
  192. const typeof(*(ptr)) __user * __gu_addr = (ptr); \
  193. int __gu_err = 0; \
  194. \
  195. if (access_ok(VERIFY_READ, __gu_addr, size)) { \
  196. switch (size) { \
  197. case 1: \
  198. __get_user_asm("ub", __gu_val, __gu_addr, \
  199. __gu_err); \
  200. break; \
  201. case 2: \
  202. __get_user_asm("uh", __gu_val, __gu_addr, \
  203. __gu_err); \
  204. break; \
  205. case 4: \
  206. __get_user_asm("w", __gu_val, __gu_addr, \
  207. __gu_err); \
  208. break; \
  209. default: \
  210. __gu_err = __get_user_bad(); \
  211. break; \
  212. } \
  213. } else { \
  214. __gu_err = -EFAULT; \
  215. } \
  216. x = (__force typeof(*(ptr)))__gu_val; \
  217. __gu_err; \
  218. })
  219. #define __get_user_asm(suffix, __gu_val, ptr, __gu_err) \
  220. asm volatile( \
  221. "1: ld." suffix " %1, %3 \n" \
  222. "2: \n" \
  223. " .subsection 1 \n" \
  224. "3: mov %0, %4 \n" \
  225. " rjmp 2b \n" \
  226. " .subsection 0 \n" \
  227. " .section __ex_table, \"a\" \n" \
  228. " .long 1b, 3b \n" \
  229. " .previous \n" \
  230. : "=r"(__gu_err), "=r"(__gu_val) \
  231. : "0"(__gu_err), "m"(*(ptr)), "i"(-EFAULT))
  232. #define __put_user_nocheck(x, ptr, size) \
  233. ({ \
  234. typeof(*(ptr)) __pu_val; \
  235. int __pu_err = 0; \
  236. \
  237. __pu_val = (x); \
  238. switch (size) { \
  239. case 1: __put_user_asm("b", ptr, __pu_val, __pu_err); break; \
  240. case 2: __put_user_asm("h", ptr, __pu_val, __pu_err); break; \
  241. case 4: __put_user_asm("w", ptr, __pu_val, __pu_err); break; \
  242. case 8: __put_user_asm("d", ptr, __pu_val, __pu_err); break; \
  243. default: __pu_err = __put_user_bad(); break; \
  244. } \
  245. __pu_err; \
  246. })
  247. #define __put_user_check(x, ptr, size) \
  248. ({ \
  249. typeof(*(ptr)) __pu_val; \
  250. typeof(*(ptr)) __user *__pu_addr = (ptr); \
  251. int __pu_err = 0; \
  252. \
  253. __pu_val = (x); \
  254. if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \
  255. switch (size) { \
  256. case 1: \
  257. __put_user_asm("b", __pu_addr, __pu_val, \
  258. __pu_err); \
  259. break; \
  260. case 2: \
  261. __put_user_asm("h", __pu_addr, __pu_val, \
  262. __pu_err); \
  263. break; \
  264. case 4: \
  265. __put_user_asm("w", __pu_addr, __pu_val, \
  266. __pu_err); \
  267. break; \
  268. case 8: \
  269. __put_user_asm("d", __pu_addr, __pu_val, \
  270. __pu_err); \
  271. break; \
  272. default: \
  273. __pu_err = __put_user_bad(); \
  274. break; \
  275. } \
  276. } else { \
  277. __pu_err = -EFAULT; \
  278. } \
  279. __pu_err; \
  280. })
  281. #define __put_user_asm(suffix, ptr, __pu_val, __gu_err) \
  282. asm volatile( \
  283. "1: st." suffix " %1, %3 \n" \
  284. "2: \n" \
  285. " .subsection 1 \n" \
  286. "3: mov %0, %4 \n" \
  287. " rjmp 2b \n" \
  288. " .subsection 0 \n" \
  289. " .section __ex_table, \"a\" \n" \
  290. " .long 1b, 3b \n" \
  291. " .previous \n" \
  292. : "=r"(__gu_err), "=m"(*(ptr)) \
  293. : "0"(__gu_err), "r"(__pu_val), "i"(-EFAULT))
  294. extern __kernel_size_t clear_user(void __user *addr, __kernel_size_t size);
  295. extern __kernel_size_t __clear_user(void __user *addr, __kernel_size_t size);
  296. extern long strncpy_from_user(char *dst, const char __user *src, long count);
  297. extern long __strncpy_from_user(char *dst, const char __user *src, long count);
  298. extern long strnlen_user(const char __user *__s, long __n);
  299. extern long __strnlen_user(const char __user *__s, long __n);
  300. #define strlen_user(s) strnlen_user(s, ~0UL >> 1)
  301. struct exception_table_entry
  302. {
  303. unsigned long insn, fixup;
  304. };
  305. #endif /* __ASM_AVR32_UACCESS_H */