copy_user.S 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * Copy to/from userspace with optional address space checking.
  3. *
  4. * Copyright 2004-2006 Atmel Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <asm/page.h>
  11. #include <asm/thread_info.h>
  12. #include <asm/asm.h>
  13. /*
  14. * __kernel_size_t
  15. * __copy_user(void *to, const void *from, __kernel_size_t n)
  16. *
  17. * Returns the number of bytes not copied. Might be off by
  18. * max 3 bytes if we get a fault in the main loop.
  19. *
  20. * The address-space checking functions simply fall through to
  21. * the non-checking version.
  22. */
  23. .text
  24. .align 1
  25. .global ___copy_from_user
  26. .type ___copy_from_user, @function
  27. ___copy_from_user:
  28. branch_if_kernel r8, __copy_user
  29. ret_if_privileged r8, r11, r10, r10
  30. rjmp __copy_user
  31. .size ___copy_from_user, . - ___copy_from_user
  32. .global copy_to_user
  33. .type copy_to_user, @function
  34. copy_to_user:
  35. branch_if_kernel r8, __copy_user
  36. ret_if_privileged r8, r12, r10, r10
  37. .size copy_to_user, . - copy_to_user
  38. .global __copy_user
  39. .type __copy_user, @function
  40. __copy_user:
  41. mov r9, r11
  42. andl r9, 3, COH
  43. brne 6f
  44. /* At this point, from is word-aligned */
  45. 1: sub r10, 4
  46. brlt 3f
  47. 2:
  48. 10: ld.w r8, r11++
  49. 11: st.w r12++, r8
  50. sub r10, 4
  51. brge 2b
  52. 3: sub r10, -4
  53. reteq 0
  54. /*
  55. * Handle unaligned count. Need to be careful with r10 here so
  56. * that we return the correct value even if we get a fault
  57. */
  58. 4:
  59. 20: ld.ub r8, r11++
  60. 21: st.b r12++, r8
  61. sub r10, 1
  62. reteq 0
  63. 22: ld.ub r8, r11++
  64. 23: st.b r12++, r8
  65. sub r10, 1
  66. reteq 0
  67. 24: ld.ub r8, r11++
  68. 25: st.b r12++, r8
  69. retal 0
  70. /* Handle unaligned from-pointer */
  71. 6: cp.w r10, 4
  72. brlt 4b
  73. rsub r9, r9, 4
  74. 30: ld.ub r8, r11++
  75. 31: st.b r12++, r8
  76. sub r10, 1
  77. sub r9, 1
  78. breq 1b
  79. 32: ld.ub r8, r11++
  80. 33: st.b r12++, r8
  81. sub r10, 1
  82. sub r9, 1
  83. breq 1b
  84. 34: ld.ub r8, r11++
  85. 35: st.b r12++, r8
  86. sub r10, 1
  87. rjmp 1b
  88. .size __copy_user, . - __copy_user
  89. .section .fixup,"ax"
  90. .align 1
  91. 19: sub r10, -4
  92. 29: retal r10
  93. .section __ex_table,"a"
  94. .align 2
  95. .long 10b, 19b
  96. .long 11b, 19b
  97. .long 20b, 29b
  98. .long 21b, 29b
  99. .long 22b, 29b
  100. .long 23b, 29b
  101. .long 24b, 29b
  102. .long 25b, 29b
  103. .long 30b, 29b
  104. .long 31b, 29b
  105. .long 32b, 29b
  106. .long 33b, 29b
  107. .long 34b, 29b
  108. .long 35b, 29b