memmove_64.S 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. /*
  2. * Normally compiler builtins are used, but sometimes the compiler calls out
  3. * of line code. Based on asm-i386/string.h.
  4. *
  5. * This assembly file is re-written from memmove_64.c file.
  6. * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
  7. */
  8. #include <linux/linkage.h>
  9. #include <asm/cpufeatures.h>
  10. #include <asm/alternative-asm.h>
  11. #undef memmove
  12. /*
  13. * Implement memmove(). This can handle overlap between src and dst.
  14. *
  15. * Input:
  16. * rdi: dest
  17. * rsi: src
  18. * rdx: count
  19. *
  20. * Output:
  21. * rax: dest
  22. */
  23. .weak memmove
  24. ENTRY(memmove)
  25. ENTRY(__memmove)
  26. /* Handle more 32 bytes in loop */
  27. mov %rdi, %rax
  28. cmp $0x20, %rdx
  29. jb 1f
  30. /* Decide forward/backward copy mode */
  31. cmp %rdi, %rsi
  32. jge .Lmemmove_begin_forward
  33. mov %rsi, %r8
  34. add %rdx, %r8
  35. cmp %rdi, %r8
  36. jg 2f
  37. .Lmemmove_begin_forward:
  38. ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
  39. /*
  40. * movsq instruction have many startup latency
  41. * so we handle small size by general register.
  42. */
  43. cmp $680, %rdx
  44. jb 3f
  45. /*
  46. * movsq instruction is only good for aligned case.
  47. */
  48. cmpb %dil, %sil
  49. je 4f
  50. 3:
  51. sub $0x20, %rdx
  52. /*
  53. * We gobble 32 bytes forward in each loop.
  54. */
  55. 5:
  56. sub $0x20, %rdx
  57. movq 0*8(%rsi), %r11
  58. movq 1*8(%rsi), %r10
  59. movq 2*8(%rsi), %r9
  60. movq 3*8(%rsi), %r8
  61. leaq 4*8(%rsi), %rsi
  62. movq %r11, 0*8(%rdi)
  63. movq %r10, 1*8(%rdi)
  64. movq %r9, 2*8(%rdi)
  65. movq %r8, 3*8(%rdi)
  66. leaq 4*8(%rdi), %rdi
  67. jae 5b
  68. addq $0x20, %rdx
  69. jmp 1f
  70. /*
  71. * Handle data forward by movsq.
  72. */
  73. .p2align 4
  74. 4:
  75. movq %rdx, %rcx
  76. movq -8(%rsi, %rdx), %r11
  77. lea -8(%rdi, %rdx), %r10
  78. shrq $3, %rcx
  79. rep movsq
  80. movq %r11, (%r10)
  81. jmp 13f
  82. .Lmemmove_end_forward:
  83. /*
  84. * Handle data backward by movsq.
  85. */
  86. .p2align 4
  87. 7:
  88. movq %rdx, %rcx
  89. movq (%rsi), %r11
  90. movq %rdi, %r10
  91. leaq -8(%rsi, %rdx), %rsi
  92. leaq -8(%rdi, %rdx), %rdi
  93. shrq $3, %rcx
  94. std
  95. rep movsq
  96. cld
  97. movq %r11, (%r10)
  98. jmp 13f
  99. /*
  100. * Start to prepare for backward copy.
  101. */
  102. .p2align 4
  103. 2:
  104. cmp $680, %rdx
  105. jb 6f
  106. cmp %dil, %sil
  107. je 7b
  108. 6:
  109. /*
  110. * Calculate copy position to tail.
  111. */
  112. addq %rdx, %rsi
  113. addq %rdx, %rdi
  114. subq $0x20, %rdx
  115. /*
  116. * We gobble 32 bytes backward in each loop.
  117. */
  118. 8:
  119. subq $0x20, %rdx
  120. movq -1*8(%rsi), %r11
  121. movq -2*8(%rsi), %r10
  122. movq -3*8(%rsi), %r9
  123. movq -4*8(%rsi), %r8
  124. leaq -4*8(%rsi), %rsi
  125. movq %r11, -1*8(%rdi)
  126. movq %r10, -2*8(%rdi)
  127. movq %r9, -3*8(%rdi)
  128. movq %r8, -4*8(%rdi)
  129. leaq -4*8(%rdi), %rdi
  130. jae 8b
  131. /*
  132. * Calculate copy position to head.
  133. */
  134. addq $0x20, %rdx
  135. subq %rdx, %rsi
  136. subq %rdx, %rdi
  137. 1:
  138. cmpq $16, %rdx
  139. jb 9f
  140. /*
  141. * Move data from 16 bytes to 31 bytes.
  142. */
  143. movq 0*8(%rsi), %r11
  144. movq 1*8(%rsi), %r10
  145. movq -2*8(%rsi, %rdx), %r9
  146. movq -1*8(%rsi, %rdx), %r8
  147. movq %r11, 0*8(%rdi)
  148. movq %r10, 1*8(%rdi)
  149. movq %r9, -2*8(%rdi, %rdx)
  150. movq %r8, -1*8(%rdi, %rdx)
  151. jmp 13f
  152. .p2align 4
  153. 9:
  154. cmpq $8, %rdx
  155. jb 10f
  156. /*
  157. * Move data from 8 bytes to 15 bytes.
  158. */
  159. movq 0*8(%rsi), %r11
  160. movq -1*8(%rsi, %rdx), %r10
  161. movq %r11, 0*8(%rdi)
  162. movq %r10, -1*8(%rdi, %rdx)
  163. jmp 13f
  164. 10:
  165. cmpq $4, %rdx
  166. jb 11f
  167. /*
  168. * Move data from 4 bytes to 7 bytes.
  169. */
  170. movl (%rsi), %r11d
  171. movl -4(%rsi, %rdx), %r10d
  172. movl %r11d, (%rdi)
  173. movl %r10d, -4(%rdi, %rdx)
  174. jmp 13f
  175. 11:
  176. cmp $2, %rdx
  177. jb 12f
  178. /*
  179. * Move data from 2 bytes to 3 bytes.
  180. */
  181. movw (%rsi), %r11w
  182. movw -2(%rsi, %rdx), %r10w
  183. movw %r11w, (%rdi)
  184. movw %r10w, -2(%rdi, %rdx)
  185. jmp 13f
  186. 12:
  187. cmp $1, %rdx
  188. jb 13f
  189. /*
  190. * Move data for 1 byte.
  191. */
  192. movb (%rsi), %r11b
  193. movb %r11b, (%rdi)
  194. 13:
  195. retq
  196. ENDPROC(__memmove)
  197. ENDPROC(memmove)