usercopy_32.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /*
  2. * User address space access functions.
  3. * The non inlined parts of asm-i386/uaccess.h are here.
  4. *
  5. * Copyright 1997 Andi Kleen <ak@muc.de>
  6. * Copyright 1997 Linus Torvalds
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/highmem.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/module.h>
  12. #include <linux/backing-dev.h>
  13. #include <linux/interrupt.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/mmx.h>
  16. #include <asm/asm.h>
  17. #ifdef CONFIG_X86_INTEL_USERCOPY
  18. /*
  19. * Alignment at which movsl is preferred for bulk memory copies.
  20. */
  21. struct movsl_mask movsl_mask __read_mostly;
  22. #endif
  23. static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
  24. {
  25. #ifdef CONFIG_X86_INTEL_USERCOPY
  26. if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
  27. return 0;
  28. #endif
  29. return 1;
  30. }
  31. #define movsl_is_ok(a1, a2, n) \
  32. __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
  33. /*
  34. * Zero Userspace
  35. */
  36. #define __do_clear_user(addr,size) \
  37. do { \
  38. int __d0; \
  39. might_fault(); \
  40. __asm__ __volatile__( \
  41. ASM_STAC "\n" \
  42. "0: rep; stosl\n" \
  43. " movl %2,%0\n" \
  44. "1: rep; stosb\n" \
  45. "2: " ASM_CLAC "\n" \
  46. ".section .fixup,\"ax\"\n" \
  47. "3: lea 0(%2,%0,4),%0\n" \
  48. " jmp 2b\n" \
  49. ".previous\n" \
  50. _ASM_EXTABLE(0b,3b) \
  51. _ASM_EXTABLE(1b,2b) \
  52. : "=&c"(size), "=&D" (__d0) \
  53. : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
  54. } while (0)
  55. /**
  56. * clear_user: - Zero a block of memory in user space.
  57. * @to: Destination address, in user space.
  58. * @n: Number of bytes to zero.
  59. *
  60. * Zero a block of memory in user space.
  61. *
  62. * Returns number of bytes that could not be cleared.
  63. * On success, this will be zero.
  64. */
  65. unsigned long
  66. clear_user(void __user *to, unsigned long n)
  67. {
  68. might_fault();
  69. if (access_ok(VERIFY_WRITE, to, n))
  70. __do_clear_user(to, n);
  71. return n;
  72. }
  73. EXPORT_SYMBOL(clear_user);
  74. /**
  75. * __clear_user: - Zero a block of memory in user space, with less checking.
  76. * @to: Destination address, in user space.
  77. * @n: Number of bytes to zero.
  78. *
  79. * Zero a block of memory in user space. Caller must check
  80. * the specified block with access_ok() before calling this function.
  81. *
  82. * Returns number of bytes that could not be cleared.
  83. * On success, this will be zero.
  84. */
  85. unsigned long
  86. __clear_user(void __user *to, unsigned long n)
  87. {
  88. __do_clear_user(to, n);
  89. return n;
  90. }
  91. EXPORT_SYMBOL(__clear_user);
  92. #ifdef CONFIG_X86_INTEL_USERCOPY
  93. static unsigned long
  94. __copy_user_intel(void __user *to, const void *from, unsigned long size)
  95. {
  96. int d0, d1;
  97. __asm__ __volatile__(
  98. " .align 2,0x90\n"
  99. "1: movl 32(%4), %%eax\n"
  100. " cmpl $67, %0\n"
  101. " jbe 3f\n"
  102. "2: movl 64(%4), %%eax\n"
  103. " .align 2,0x90\n"
  104. "3: movl 0(%4), %%eax\n"
  105. "4: movl 4(%4), %%edx\n"
  106. "5: movl %%eax, 0(%3)\n"
  107. "6: movl %%edx, 4(%3)\n"
  108. "7: movl 8(%4), %%eax\n"
  109. "8: movl 12(%4),%%edx\n"
  110. "9: movl %%eax, 8(%3)\n"
  111. "10: movl %%edx, 12(%3)\n"
  112. "11: movl 16(%4), %%eax\n"
  113. "12: movl 20(%4), %%edx\n"
  114. "13: movl %%eax, 16(%3)\n"
  115. "14: movl %%edx, 20(%3)\n"
  116. "15: movl 24(%4), %%eax\n"
  117. "16: movl 28(%4), %%edx\n"
  118. "17: movl %%eax, 24(%3)\n"
  119. "18: movl %%edx, 28(%3)\n"
  120. "19: movl 32(%4), %%eax\n"
  121. "20: movl 36(%4), %%edx\n"
  122. "21: movl %%eax, 32(%3)\n"
  123. "22: movl %%edx, 36(%3)\n"
  124. "23: movl 40(%4), %%eax\n"
  125. "24: movl 44(%4), %%edx\n"
  126. "25: movl %%eax, 40(%3)\n"
  127. "26: movl %%edx, 44(%3)\n"
  128. "27: movl 48(%4), %%eax\n"
  129. "28: movl 52(%4), %%edx\n"
  130. "29: movl %%eax, 48(%3)\n"
  131. "30: movl %%edx, 52(%3)\n"
  132. "31: movl 56(%4), %%eax\n"
  133. "32: movl 60(%4), %%edx\n"
  134. "33: movl %%eax, 56(%3)\n"
  135. "34: movl %%edx, 60(%3)\n"
  136. " addl $-64, %0\n"
  137. " addl $64, %4\n"
  138. " addl $64, %3\n"
  139. " cmpl $63, %0\n"
  140. " ja 1b\n"
  141. "35: movl %0, %%eax\n"
  142. " shrl $2, %0\n"
  143. " andl $3, %%eax\n"
  144. " cld\n"
  145. "99: rep; movsl\n"
  146. "36: movl %%eax, %0\n"
  147. "37: rep; movsb\n"
  148. "100:\n"
  149. ".section .fixup,\"ax\"\n"
  150. "101: lea 0(%%eax,%0,4),%0\n"
  151. " jmp 100b\n"
  152. ".previous\n"
  153. _ASM_EXTABLE(1b,100b)
  154. _ASM_EXTABLE(2b,100b)
  155. _ASM_EXTABLE(3b,100b)
  156. _ASM_EXTABLE(4b,100b)
  157. _ASM_EXTABLE(5b,100b)
  158. _ASM_EXTABLE(6b,100b)
  159. _ASM_EXTABLE(7b,100b)
  160. _ASM_EXTABLE(8b,100b)
  161. _ASM_EXTABLE(9b,100b)
  162. _ASM_EXTABLE(10b,100b)
  163. _ASM_EXTABLE(11b,100b)
  164. _ASM_EXTABLE(12b,100b)
  165. _ASM_EXTABLE(13b,100b)
  166. _ASM_EXTABLE(14b,100b)
  167. _ASM_EXTABLE(15b,100b)
  168. _ASM_EXTABLE(16b,100b)
  169. _ASM_EXTABLE(17b,100b)
  170. _ASM_EXTABLE(18b,100b)
  171. _ASM_EXTABLE(19b,100b)
  172. _ASM_EXTABLE(20b,100b)
  173. _ASM_EXTABLE(21b,100b)
  174. _ASM_EXTABLE(22b,100b)
  175. _ASM_EXTABLE(23b,100b)
  176. _ASM_EXTABLE(24b,100b)
  177. _ASM_EXTABLE(25b,100b)
  178. _ASM_EXTABLE(26b,100b)
  179. _ASM_EXTABLE(27b,100b)
  180. _ASM_EXTABLE(28b,100b)
  181. _ASM_EXTABLE(29b,100b)
  182. _ASM_EXTABLE(30b,100b)
  183. _ASM_EXTABLE(31b,100b)
  184. _ASM_EXTABLE(32b,100b)
  185. _ASM_EXTABLE(33b,100b)
  186. _ASM_EXTABLE(34b,100b)
  187. _ASM_EXTABLE(35b,100b)
  188. _ASM_EXTABLE(36b,100b)
  189. _ASM_EXTABLE(37b,100b)
  190. _ASM_EXTABLE(99b,101b)
  191. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  192. : "1"(to), "2"(from), "0"(size)
  193. : "eax", "edx", "memory");
  194. return size;
  195. }
  196. static unsigned long
  197. __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
  198. {
  199. int d0, d1;
  200. __asm__ __volatile__(
  201. " .align 2,0x90\n"
  202. "0: movl 32(%4), %%eax\n"
  203. " cmpl $67, %0\n"
  204. " jbe 2f\n"
  205. "1: movl 64(%4), %%eax\n"
  206. " .align 2,0x90\n"
  207. "2: movl 0(%4), %%eax\n"
  208. "21: movl 4(%4), %%edx\n"
  209. " movl %%eax, 0(%3)\n"
  210. " movl %%edx, 4(%3)\n"
  211. "3: movl 8(%4), %%eax\n"
  212. "31: movl 12(%4),%%edx\n"
  213. " movl %%eax, 8(%3)\n"
  214. " movl %%edx, 12(%3)\n"
  215. "4: movl 16(%4), %%eax\n"
  216. "41: movl 20(%4), %%edx\n"
  217. " movl %%eax, 16(%3)\n"
  218. " movl %%edx, 20(%3)\n"
  219. "10: movl 24(%4), %%eax\n"
  220. "51: movl 28(%4), %%edx\n"
  221. " movl %%eax, 24(%3)\n"
  222. " movl %%edx, 28(%3)\n"
  223. "11: movl 32(%4), %%eax\n"
  224. "61: movl 36(%4), %%edx\n"
  225. " movl %%eax, 32(%3)\n"
  226. " movl %%edx, 36(%3)\n"
  227. "12: movl 40(%4), %%eax\n"
  228. "71: movl 44(%4), %%edx\n"
  229. " movl %%eax, 40(%3)\n"
  230. " movl %%edx, 44(%3)\n"
  231. "13: movl 48(%4), %%eax\n"
  232. "81: movl 52(%4), %%edx\n"
  233. " movl %%eax, 48(%3)\n"
  234. " movl %%edx, 52(%3)\n"
  235. "14: movl 56(%4), %%eax\n"
  236. "91: movl 60(%4), %%edx\n"
  237. " movl %%eax, 56(%3)\n"
  238. " movl %%edx, 60(%3)\n"
  239. " addl $-64, %0\n"
  240. " addl $64, %4\n"
  241. " addl $64, %3\n"
  242. " cmpl $63, %0\n"
  243. " ja 0b\n"
  244. "5: movl %0, %%eax\n"
  245. " shrl $2, %0\n"
  246. " andl $3, %%eax\n"
  247. " cld\n"
  248. "6: rep; movsl\n"
  249. " movl %%eax,%0\n"
  250. "7: rep; movsb\n"
  251. "8:\n"
  252. ".section .fixup,\"ax\"\n"
  253. "9: lea 0(%%eax,%0,4),%0\n"
  254. "16: pushl %0\n"
  255. " pushl %%eax\n"
  256. " xorl %%eax,%%eax\n"
  257. " rep; stosb\n"
  258. " popl %%eax\n"
  259. " popl %0\n"
  260. " jmp 8b\n"
  261. ".previous\n"
  262. _ASM_EXTABLE(0b,16b)
  263. _ASM_EXTABLE(1b,16b)
  264. _ASM_EXTABLE(2b,16b)
  265. _ASM_EXTABLE(21b,16b)
  266. _ASM_EXTABLE(3b,16b)
  267. _ASM_EXTABLE(31b,16b)
  268. _ASM_EXTABLE(4b,16b)
  269. _ASM_EXTABLE(41b,16b)
  270. _ASM_EXTABLE(10b,16b)
  271. _ASM_EXTABLE(51b,16b)
  272. _ASM_EXTABLE(11b,16b)
  273. _ASM_EXTABLE(61b,16b)
  274. _ASM_EXTABLE(12b,16b)
  275. _ASM_EXTABLE(71b,16b)
  276. _ASM_EXTABLE(13b,16b)
  277. _ASM_EXTABLE(81b,16b)
  278. _ASM_EXTABLE(14b,16b)
  279. _ASM_EXTABLE(91b,16b)
  280. _ASM_EXTABLE(6b,9b)
  281. _ASM_EXTABLE(7b,16b)
  282. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  283. : "1"(to), "2"(from), "0"(size)
  284. : "eax", "edx", "memory");
  285. return size;
  286. }
  287. /*
  288. * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
  289. * hyoshiok@miraclelinux.com
  290. */
  291. static unsigned long __copy_user_zeroing_intel_nocache(void *to,
  292. const void __user *from, unsigned long size)
  293. {
  294. int d0, d1;
  295. __asm__ __volatile__(
  296. " .align 2,0x90\n"
  297. "0: movl 32(%4), %%eax\n"
  298. " cmpl $67, %0\n"
  299. " jbe 2f\n"
  300. "1: movl 64(%4), %%eax\n"
  301. " .align 2,0x90\n"
  302. "2: movl 0(%4), %%eax\n"
  303. "21: movl 4(%4), %%edx\n"
  304. " movnti %%eax, 0(%3)\n"
  305. " movnti %%edx, 4(%3)\n"
  306. "3: movl 8(%4), %%eax\n"
  307. "31: movl 12(%4),%%edx\n"
  308. " movnti %%eax, 8(%3)\n"
  309. " movnti %%edx, 12(%3)\n"
  310. "4: movl 16(%4), %%eax\n"
  311. "41: movl 20(%4), %%edx\n"
  312. " movnti %%eax, 16(%3)\n"
  313. " movnti %%edx, 20(%3)\n"
  314. "10: movl 24(%4), %%eax\n"
  315. "51: movl 28(%4), %%edx\n"
  316. " movnti %%eax, 24(%3)\n"
  317. " movnti %%edx, 28(%3)\n"
  318. "11: movl 32(%4), %%eax\n"
  319. "61: movl 36(%4), %%edx\n"
  320. " movnti %%eax, 32(%3)\n"
  321. " movnti %%edx, 36(%3)\n"
  322. "12: movl 40(%4), %%eax\n"
  323. "71: movl 44(%4), %%edx\n"
  324. " movnti %%eax, 40(%3)\n"
  325. " movnti %%edx, 44(%3)\n"
  326. "13: movl 48(%4), %%eax\n"
  327. "81: movl 52(%4), %%edx\n"
  328. " movnti %%eax, 48(%3)\n"
  329. " movnti %%edx, 52(%3)\n"
  330. "14: movl 56(%4), %%eax\n"
  331. "91: movl 60(%4), %%edx\n"
  332. " movnti %%eax, 56(%3)\n"
  333. " movnti %%edx, 60(%3)\n"
  334. " addl $-64, %0\n"
  335. " addl $64, %4\n"
  336. " addl $64, %3\n"
  337. " cmpl $63, %0\n"
  338. " ja 0b\n"
  339. " sfence \n"
  340. "5: movl %0, %%eax\n"
  341. " shrl $2, %0\n"
  342. " andl $3, %%eax\n"
  343. " cld\n"
  344. "6: rep; movsl\n"
  345. " movl %%eax,%0\n"
  346. "7: rep; movsb\n"
  347. "8:\n"
  348. ".section .fixup,\"ax\"\n"
  349. "9: lea 0(%%eax,%0,4),%0\n"
  350. "16: pushl %0\n"
  351. " pushl %%eax\n"
  352. " xorl %%eax,%%eax\n"
  353. " rep; stosb\n"
  354. " popl %%eax\n"
  355. " popl %0\n"
  356. " jmp 8b\n"
  357. ".previous\n"
  358. _ASM_EXTABLE(0b,16b)
  359. _ASM_EXTABLE(1b,16b)
  360. _ASM_EXTABLE(2b,16b)
  361. _ASM_EXTABLE(21b,16b)
  362. _ASM_EXTABLE(3b,16b)
  363. _ASM_EXTABLE(31b,16b)
  364. _ASM_EXTABLE(4b,16b)
  365. _ASM_EXTABLE(41b,16b)
  366. _ASM_EXTABLE(10b,16b)
  367. _ASM_EXTABLE(51b,16b)
  368. _ASM_EXTABLE(11b,16b)
  369. _ASM_EXTABLE(61b,16b)
  370. _ASM_EXTABLE(12b,16b)
  371. _ASM_EXTABLE(71b,16b)
  372. _ASM_EXTABLE(13b,16b)
  373. _ASM_EXTABLE(81b,16b)
  374. _ASM_EXTABLE(14b,16b)
  375. _ASM_EXTABLE(91b,16b)
  376. _ASM_EXTABLE(6b,9b)
  377. _ASM_EXTABLE(7b,16b)
  378. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  379. : "1"(to), "2"(from), "0"(size)
  380. : "eax", "edx", "memory");
  381. return size;
  382. }
  383. static unsigned long __copy_user_intel_nocache(void *to,
  384. const void __user *from, unsigned long size)
  385. {
  386. int d0, d1;
  387. __asm__ __volatile__(
  388. " .align 2,0x90\n"
  389. "0: movl 32(%4), %%eax\n"
  390. " cmpl $67, %0\n"
  391. " jbe 2f\n"
  392. "1: movl 64(%4), %%eax\n"
  393. " .align 2,0x90\n"
  394. "2: movl 0(%4), %%eax\n"
  395. "21: movl 4(%4), %%edx\n"
  396. " movnti %%eax, 0(%3)\n"
  397. " movnti %%edx, 4(%3)\n"
  398. "3: movl 8(%4), %%eax\n"
  399. "31: movl 12(%4),%%edx\n"
  400. " movnti %%eax, 8(%3)\n"
  401. " movnti %%edx, 12(%3)\n"
  402. "4: movl 16(%4), %%eax\n"
  403. "41: movl 20(%4), %%edx\n"
  404. " movnti %%eax, 16(%3)\n"
  405. " movnti %%edx, 20(%3)\n"
  406. "10: movl 24(%4), %%eax\n"
  407. "51: movl 28(%4), %%edx\n"
  408. " movnti %%eax, 24(%3)\n"
  409. " movnti %%edx, 28(%3)\n"
  410. "11: movl 32(%4), %%eax\n"
  411. "61: movl 36(%4), %%edx\n"
  412. " movnti %%eax, 32(%3)\n"
  413. " movnti %%edx, 36(%3)\n"
  414. "12: movl 40(%4), %%eax\n"
  415. "71: movl 44(%4), %%edx\n"
  416. " movnti %%eax, 40(%3)\n"
  417. " movnti %%edx, 44(%3)\n"
  418. "13: movl 48(%4), %%eax\n"
  419. "81: movl 52(%4), %%edx\n"
  420. " movnti %%eax, 48(%3)\n"
  421. " movnti %%edx, 52(%3)\n"
  422. "14: movl 56(%4), %%eax\n"
  423. "91: movl 60(%4), %%edx\n"
  424. " movnti %%eax, 56(%3)\n"
  425. " movnti %%edx, 60(%3)\n"
  426. " addl $-64, %0\n"
  427. " addl $64, %4\n"
  428. " addl $64, %3\n"
  429. " cmpl $63, %0\n"
  430. " ja 0b\n"
  431. " sfence \n"
  432. "5: movl %0, %%eax\n"
  433. " shrl $2, %0\n"
  434. " andl $3, %%eax\n"
  435. " cld\n"
  436. "6: rep; movsl\n"
  437. " movl %%eax,%0\n"
  438. "7: rep; movsb\n"
  439. "8:\n"
  440. ".section .fixup,\"ax\"\n"
  441. "9: lea 0(%%eax,%0,4),%0\n"
  442. "16: jmp 8b\n"
  443. ".previous\n"
  444. _ASM_EXTABLE(0b,16b)
  445. _ASM_EXTABLE(1b,16b)
  446. _ASM_EXTABLE(2b,16b)
  447. _ASM_EXTABLE(21b,16b)
  448. _ASM_EXTABLE(3b,16b)
  449. _ASM_EXTABLE(31b,16b)
  450. _ASM_EXTABLE(4b,16b)
  451. _ASM_EXTABLE(41b,16b)
  452. _ASM_EXTABLE(10b,16b)
  453. _ASM_EXTABLE(51b,16b)
  454. _ASM_EXTABLE(11b,16b)
  455. _ASM_EXTABLE(61b,16b)
  456. _ASM_EXTABLE(12b,16b)
  457. _ASM_EXTABLE(71b,16b)
  458. _ASM_EXTABLE(13b,16b)
  459. _ASM_EXTABLE(81b,16b)
  460. _ASM_EXTABLE(14b,16b)
  461. _ASM_EXTABLE(91b,16b)
  462. _ASM_EXTABLE(6b,9b)
  463. _ASM_EXTABLE(7b,16b)
  464. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  465. : "1"(to), "2"(from), "0"(size)
  466. : "eax", "edx", "memory");
  467. return size;
  468. }
  469. #else
  470. /*
  471. * Leave these declared but undefined. They should not be any references to
  472. * them
  473. */
  474. unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
  475. unsigned long size);
  476. unsigned long __copy_user_intel(void __user *to, const void *from,
  477. unsigned long size);
  478. unsigned long __copy_user_zeroing_intel_nocache(void *to,
  479. const void __user *from, unsigned long size);
  480. #endif /* CONFIG_X86_INTEL_USERCOPY */
  481. /* Generic arbitrary sized copy. */
  482. #define __copy_user(to, from, size) \
  483. do { \
  484. int __d0, __d1, __d2; \
  485. __asm__ __volatile__( \
  486. " cmp $7,%0\n" \
  487. " jbe 1f\n" \
  488. " movl %1,%0\n" \
  489. " negl %0\n" \
  490. " andl $7,%0\n" \
  491. " subl %0,%3\n" \
  492. "4: rep; movsb\n" \
  493. " movl %3,%0\n" \
  494. " shrl $2,%0\n" \
  495. " andl $3,%3\n" \
  496. " .align 2,0x90\n" \
  497. "0: rep; movsl\n" \
  498. " movl %3,%0\n" \
  499. "1: rep; movsb\n" \
  500. "2:\n" \
  501. ".section .fixup,\"ax\"\n" \
  502. "5: addl %3,%0\n" \
  503. " jmp 2b\n" \
  504. "3: lea 0(%3,%0,4),%0\n" \
  505. " jmp 2b\n" \
  506. ".previous\n" \
  507. _ASM_EXTABLE(4b,5b) \
  508. _ASM_EXTABLE(0b,3b) \
  509. _ASM_EXTABLE(1b,2b) \
  510. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  511. : "3"(size), "0"(size), "1"(to), "2"(from) \
  512. : "memory"); \
  513. } while (0)
  514. #define __copy_user_zeroing(to, from, size) \
  515. do { \
  516. int __d0, __d1, __d2; \
  517. __asm__ __volatile__( \
  518. " cmp $7,%0\n" \
  519. " jbe 1f\n" \
  520. " movl %1,%0\n" \
  521. " negl %0\n" \
  522. " andl $7,%0\n" \
  523. " subl %0,%3\n" \
  524. "4: rep; movsb\n" \
  525. " movl %3,%0\n" \
  526. " shrl $2,%0\n" \
  527. " andl $3,%3\n" \
  528. " .align 2,0x90\n" \
  529. "0: rep; movsl\n" \
  530. " movl %3,%0\n" \
  531. "1: rep; movsb\n" \
  532. "2:\n" \
  533. ".section .fixup,\"ax\"\n" \
  534. "5: addl %3,%0\n" \
  535. " jmp 6f\n" \
  536. "3: lea 0(%3,%0,4),%0\n" \
  537. "6: pushl %0\n" \
  538. " pushl %%eax\n" \
  539. " xorl %%eax,%%eax\n" \
  540. " rep; stosb\n" \
  541. " popl %%eax\n" \
  542. " popl %0\n" \
  543. " jmp 2b\n" \
  544. ".previous\n" \
  545. _ASM_EXTABLE(4b,5b) \
  546. _ASM_EXTABLE(0b,3b) \
  547. _ASM_EXTABLE(1b,6b) \
  548. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  549. : "3"(size), "0"(size), "1"(to), "2"(from) \
  550. : "memory"); \
  551. } while (0)
  552. unsigned long __copy_to_user_ll(void __user *to, const void *from,
  553. unsigned long n)
  554. {
  555. __uaccess_begin_nospec();
  556. if (movsl_is_ok(to, from, n))
  557. __copy_user(to, from, n);
  558. else
  559. n = __copy_user_intel(to, from, n);
  560. __uaccess_end();
  561. return n;
  562. }
  563. EXPORT_SYMBOL(__copy_to_user_ll);
  564. unsigned long __copy_from_user_ll(void *to, const void __user *from,
  565. unsigned long n)
  566. {
  567. __uaccess_begin_nospec();
  568. if (movsl_is_ok(to, from, n))
  569. __copy_user_zeroing(to, from, n);
  570. else
  571. n = __copy_user_zeroing_intel(to, from, n);
  572. __uaccess_end();
  573. return n;
  574. }
  575. EXPORT_SYMBOL(__copy_from_user_ll);
  576. unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
  577. unsigned long n)
  578. {
  579. __uaccess_begin_nospec();
  580. if (movsl_is_ok(to, from, n))
  581. __copy_user(to, from, n);
  582. else
  583. n = __copy_user_intel((void __user *)to,
  584. (const void *)from, n);
  585. __uaccess_end();
  586. return n;
  587. }
  588. EXPORT_SYMBOL(__copy_from_user_ll_nozero);
  589. unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
  590. unsigned long n)
  591. {
  592. __uaccess_begin_nospec();
  593. #ifdef CONFIG_X86_INTEL_USERCOPY
  594. if (n > 64 && cpu_has_xmm2)
  595. n = __copy_user_zeroing_intel_nocache(to, from, n);
  596. else
  597. __copy_user_zeroing(to, from, n);
  598. #else
  599. __copy_user_zeroing(to, from, n);
  600. #endif
  601. __uaccess_end();
  602. return n;
  603. }
  604. EXPORT_SYMBOL(__copy_from_user_ll_nocache);
  605. unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
  606. unsigned long n)
  607. {
  608. __uaccess_begin_nospec();
  609. #ifdef CONFIG_X86_INTEL_USERCOPY
  610. if (n > 64 && cpu_has_xmm2)
  611. n = __copy_user_intel_nocache(to, from, n);
  612. else
  613. __copy_user(to, from, n);
  614. #else
  615. __copy_user(to, from, n);
  616. #endif
  617. __uaccess_end();
  618. return n;
  619. }
  620. EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
  621. /**
  622. * copy_to_user: - Copy a block of data into user space.
  623. * @to: Destination address, in user space.
  624. * @from: Source address, in kernel space.
  625. * @n: Number of bytes to copy.
  626. *
  627. * Context: User context only. This function may sleep if pagefaults are
  628. * enabled.
  629. *
  630. * Copy data from kernel space to user space.
  631. *
  632. * Returns number of bytes that could not be copied.
  633. * On success, this will be zero.
  634. */
  635. unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
  636. {
  637. if (access_ok(VERIFY_WRITE, to, n))
  638. n = __copy_to_user(to, from, n);
  639. return n;
  640. }
  641. EXPORT_SYMBOL(_copy_to_user);
  642. /**
  643. * copy_from_user: - Copy a block of data from user space.
  644. * @to: Destination address, in kernel space.
  645. * @from: Source address, in user space.
  646. * @n: Number of bytes to copy.
  647. *
  648. * Context: User context only. This function may sleep if pagefaults are
  649. * enabled.
  650. *
  651. * Copy data from user space to kernel space.
  652. *
  653. * Returns number of bytes that could not be copied.
  654. * On success, this will be zero.
  655. *
  656. * If some data could not be copied, this function will pad the copied
  657. * data to the requested size using zero bytes.
  658. */
  659. unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
  660. {
  661. if (access_ok(VERIFY_READ, from, n))
  662. n = __copy_from_user(to, from, n);
  663. else
  664. memset(to, 0, n);
  665. return n;
  666. }
  667. EXPORT_SYMBOL(_copy_from_user);