copy_32.S 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. /*
  2. * Memory copy functions for 32-bit PowerPC.
  3. *
  4. * Copyright (C) 1996-2005 Paul Mackerras.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <asm/processor.h>
  12. #include <asm/cache.h>
  13. #include <asm/errno.h>
  14. #include <asm/ppc_asm.h>
  15. #define COPY_16_BYTES \
  16. lwz r7,4(r4); \
  17. lwz r8,8(r4); \
  18. lwz r9,12(r4); \
  19. lwzu r10,16(r4); \
  20. stw r7,4(r6); \
  21. stw r8,8(r6); \
  22. stw r9,12(r6); \
  23. stwu r10,16(r6)
  24. #define COPY_16_BYTES_WITHEX(n) \
  25. 8 ## n ## 0: \
  26. lwz r7,4(r4); \
  27. 8 ## n ## 1: \
  28. lwz r8,8(r4); \
  29. 8 ## n ## 2: \
  30. lwz r9,12(r4); \
  31. 8 ## n ## 3: \
  32. lwzu r10,16(r4); \
  33. 8 ## n ## 4: \
  34. stw r7,4(r6); \
  35. 8 ## n ## 5: \
  36. stw r8,8(r6); \
  37. 8 ## n ## 6: \
  38. stw r9,12(r6); \
  39. 8 ## n ## 7: \
  40. stwu r10,16(r6)
  41. #define COPY_16_BYTES_EXCODE(n) \
  42. 9 ## n ## 0: \
  43. addi r5,r5,-(16 * n); \
  44. b 104f; \
  45. 9 ## n ## 1: \
  46. addi r5,r5,-(16 * n); \
  47. b 105f; \
  48. .section __ex_table,"a"; \
  49. .align 2; \
  50. .long 8 ## n ## 0b,9 ## n ## 0b; \
  51. .long 8 ## n ## 1b,9 ## n ## 0b; \
  52. .long 8 ## n ## 2b,9 ## n ## 0b; \
  53. .long 8 ## n ## 3b,9 ## n ## 0b; \
  54. .long 8 ## n ## 4b,9 ## n ## 1b; \
  55. .long 8 ## n ## 5b,9 ## n ## 1b; \
  56. .long 8 ## n ## 6b,9 ## n ## 1b; \
  57. .long 8 ## n ## 7b,9 ## n ## 1b; \
  58. .text
  59. .text
  60. .stabs "arch/powerpc/lib/",N_SO,0,0,0f
  61. .stabs "copy_32.S",N_SO,0,0,0f
  62. 0:
  63. CACHELINE_BYTES = L1_CACHE_BYTES
  64. LG_CACHELINE_BYTES = L1_CACHE_SHIFT
  65. CACHELINE_MASK = (L1_CACHE_BYTES-1)
  66. /*
  67. * Use dcbz on the complete cache lines in the destination
  68. * to set them to zero. This requires that the destination
  69. * area is cacheable. -- paulus
  70. *
  71. * During early init, cache might not be active yet, so dcbz cannot be used.
  72. * We therefore skip the optimised bloc that uses dcbz. This jump is
  73. * replaced by a nop once cache is active. This is done in machine_init()
  74. */
  75. _GLOBAL(memset)
  76. rlwimi r4,r4,8,16,23
  77. rlwimi r4,r4,16,0,15
  78. addi r6,r3,-4
  79. cmplwi 0,r5,4
  80. blt 7f
  81. stwu r4,4(r6)
  82. beqlr
  83. andi. r0,r6,3
  84. add r5,r0,r5
  85. subf r6,r0,r6
  86. cmplwi 0,r4,0
  87. bne 2f /* Use normal procedure if r4 is not zero */
  88. _GLOBAL(memset_nocache_branch)
  89. b 2f /* Skip optimised bloc until cache is enabled */
  90. clrlwi r7,r6,32-LG_CACHELINE_BYTES
  91. add r8,r7,r5
  92. srwi r9,r8,LG_CACHELINE_BYTES
  93. addic. r9,r9,-1 /* total number of complete cachelines */
  94. ble 2f
  95. xori r0,r7,CACHELINE_MASK & ~3
  96. srwi. r0,r0,2
  97. beq 3f
  98. mtctr r0
  99. 4: stwu r4,4(r6)
  100. bdnz 4b
  101. 3: mtctr r9
  102. li r7,4
  103. 10: dcbz r7,r6
  104. addi r6,r6,CACHELINE_BYTES
  105. bdnz 10b
  106. clrlwi r5,r8,32-LG_CACHELINE_BYTES
  107. addi r5,r5,4
  108. 2: srwi r0,r5,2
  109. mtctr r0
  110. bdz 6f
  111. 1: stwu r4,4(r6)
  112. bdnz 1b
  113. 6: andi. r5,r5,3
  114. 7: cmpwi 0,r5,0
  115. beqlr
  116. mtctr r5
  117. addi r6,r6,3
  118. 8: stbu r4,1(r6)
  119. bdnz 8b
  120. blr
  121. /*
  122. * This version uses dcbz on the complete cache lines in the
  123. * destination area to reduce memory traffic. This requires that
  124. * the destination area is cacheable.
  125. * We only use this version if the source and dest don't overlap.
  126. * -- paulus.
  127. *
  128. * During early init, cache might not be active yet, so dcbz cannot be used.
  129. * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
  130. * replaced by a nop once cache is active. This is done in machine_init()
  131. */
  132. _GLOBAL(memmove)
  133. cmplw 0,r3,r4
  134. bgt backwards_memcpy
  135. /* fall through */
  136. _GLOBAL(memcpy)
  137. b generic_memcpy
  138. add r7,r3,r5 /* test if the src & dst overlap */
  139. add r8,r4,r5
  140. cmplw 0,r4,r7
  141. cmplw 1,r3,r8
  142. crand 0,0,4 /* cr0.lt &= cr1.lt */
  143. blt generic_memcpy /* if regions overlap */
  144. addi r4,r4,-4
  145. addi r6,r3,-4
  146. neg r0,r3
  147. andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
  148. beq 58f
  149. cmplw 0,r5,r0 /* is this more than total to do? */
  150. blt 63f /* if not much to do */
  151. andi. r8,r0,3 /* get it word-aligned first */
  152. subf r5,r0,r5
  153. mtctr r8
  154. beq+ 61f
  155. 70: lbz r9,4(r4) /* do some bytes */
  156. addi r4,r4,1
  157. addi r6,r6,1
  158. stb r9,3(r6)
  159. bdnz 70b
  160. 61: srwi. r0,r0,2
  161. mtctr r0
  162. beq 58f
  163. 72: lwzu r9,4(r4) /* do some words */
  164. stwu r9,4(r6)
  165. bdnz 72b
  166. 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
  167. clrlwi r5,r5,32-LG_CACHELINE_BYTES
  168. li r11,4
  169. mtctr r0
  170. beq 63f
  171. 53:
  172. dcbz r11,r6
  173. COPY_16_BYTES
  174. #if L1_CACHE_BYTES >= 32
  175. COPY_16_BYTES
  176. #if L1_CACHE_BYTES >= 64
  177. COPY_16_BYTES
  178. COPY_16_BYTES
  179. #if L1_CACHE_BYTES >= 128
  180. COPY_16_BYTES
  181. COPY_16_BYTES
  182. COPY_16_BYTES
  183. COPY_16_BYTES
  184. #endif
  185. #endif
  186. #endif
  187. bdnz 53b
  188. 63: srwi. r0,r5,2
  189. mtctr r0
  190. beq 64f
  191. 30: lwzu r0,4(r4)
  192. stwu r0,4(r6)
  193. bdnz 30b
  194. 64: andi. r0,r5,3
  195. mtctr r0
  196. beq+ 65f
  197. addi r4,r4,3
  198. addi r6,r6,3
  199. 40: lbzu r0,1(r4)
  200. stbu r0,1(r6)
  201. bdnz 40b
  202. 65: blr
  203. _GLOBAL(generic_memcpy)
  204. srwi. r7,r5,3
  205. addi r6,r3,-4
  206. addi r4,r4,-4
  207. beq 2f /* if less than 8 bytes to do */
  208. andi. r0,r6,3 /* get dest word aligned */
  209. mtctr r7
  210. bne 5f
  211. 1: lwz r7,4(r4)
  212. lwzu r8,8(r4)
  213. stw r7,4(r6)
  214. stwu r8,8(r6)
  215. bdnz 1b
  216. andi. r5,r5,7
  217. 2: cmplwi 0,r5,4
  218. blt 3f
  219. lwzu r0,4(r4)
  220. addi r5,r5,-4
  221. stwu r0,4(r6)
  222. 3: cmpwi 0,r5,0
  223. beqlr
  224. mtctr r5
  225. addi r4,r4,3
  226. addi r6,r6,3
  227. 4: lbzu r0,1(r4)
  228. stbu r0,1(r6)
  229. bdnz 4b
  230. blr
  231. 5: subfic r0,r0,4
  232. mtctr r0
  233. 6: lbz r7,4(r4)
  234. addi r4,r4,1
  235. stb r7,4(r6)
  236. addi r6,r6,1
  237. bdnz 6b
  238. subf r5,r0,r5
  239. rlwinm. r7,r5,32-3,3,31
  240. beq 2b
  241. mtctr r7
  242. b 1b
  243. _GLOBAL(backwards_memcpy)
  244. rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
  245. add r6,r3,r5
  246. add r4,r4,r5
  247. beq 2f
  248. andi. r0,r6,3
  249. mtctr r7
  250. bne 5f
  251. 1: lwz r7,-4(r4)
  252. lwzu r8,-8(r4)
  253. stw r7,-4(r6)
  254. stwu r8,-8(r6)
  255. bdnz 1b
  256. andi. r5,r5,7
  257. 2: cmplwi 0,r5,4
  258. blt 3f
  259. lwzu r0,-4(r4)
  260. subi r5,r5,4
  261. stwu r0,-4(r6)
  262. 3: cmpwi 0,r5,0
  263. beqlr
  264. mtctr r5
  265. 4: lbzu r0,-1(r4)
  266. stbu r0,-1(r6)
  267. bdnz 4b
  268. blr
  269. 5: mtctr r0
  270. 6: lbzu r7,-1(r4)
  271. stbu r7,-1(r6)
  272. bdnz 6b
  273. subf r5,r0,r5
  274. rlwinm. r7,r5,32-3,3,31
  275. beq 2b
  276. mtctr r7
  277. b 1b
  278. _GLOBAL(__copy_tofrom_user)
  279. addi r4,r4,-4
  280. addi r6,r3,-4
  281. neg r0,r3
  282. andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
  283. beq 58f
  284. cmplw 0,r5,r0 /* is this more than total to do? */
  285. blt 63f /* if not much to do */
  286. andi. r8,r0,3 /* get it word-aligned first */
  287. mtctr r8
  288. beq+ 61f
  289. 70: lbz r9,4(r4) /* do some bytes */
  290. 71: stb r9,4(r6)
  291. addi r4,r4,1
  292. addi r6,r6,1
  293. bdnz 70b
  294. 61: subf r5,r0,r5
  295. srwi. r0,r0,2
  296. mtctr r0
  297. beq 58f
  298. 72: lwzu r9,4(r4) /* do some words */
  299. 73: stwu r9,4(r6)
  300. bdnz 72b
  301. .section __ex_table,"a"
  302. .align 2
  303. .long 70b,100f
  304. .long 71b,101f
  305. .long 72b,102f
  306. .long 73b,103f
  307. .text
  308. 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
  309. clrlwi r5,r5,32-LG_CACHELINE_BYTES
  310. li r11,4
  311. beq 63f
  312. /* Here we decide how far ahead to prefetch the source */
  313. li r3,4
  314. cmpwi r0,1
  315. li r7,0
  316. ble 114f
  317. li r7,1
  318. #if MAX_COPY_PREFETCH > 1
  319. /* Heuristically, for large transfers we prefetch
  320. MAX_COPY_PREFETCH cachelines ahead. For small transfers
  321. we prefetch 1 cacheline ahead. */
  322. cmpwi r0,MAX_COPY_PREFETCH
  323. ble 112f
  324. li r7,MAX_COPY_PREFETCH
  325. 112: mtctr r7
  326. 111: dcbt r3,r4
  327. addi r3,r3,CACHELINE_BYTES
  328. bdnz 111b
  329. #else
  330. dcbt r3,r4
  331. addi r3,r3,CACHELINE_BYTES
  332. #endif /* MAX_COPY_PREFETCH > 1 */
  333. 114: subf r8,r7,r0
  334. mr r0,r7
  335. mtctr r8
  336. 53: dcbt r3,r4
  337. 54: dcbz r11,r6
  338. .section __ex_table,"a"
  339. .align 2
  340. .long 54b,105f
  341. .text
  342. /* the main body of the cacheline loop */
  343. COPY_16_BYTES_WITHEX(0)
  344. #if L1_CACHE_BYTES >= 32
  345. COPY_16_BYTES_WITHEX(1)
  346. #if L1_CACHE_BYTES >= 64
  347. COPY_16_BYTES_WITHEX(2)
  348. COPY_16_BYTES_WITHEX(3)
  349. #if L1_CACHE_BYTES >= 128
  350. COPY_16_BYTES_WITHEX(4)
  351. COPY_16_BYTES_WITHEX(5)
  352. COPY_16_BYTES_WITHEX(6)
  353. COPY_16_BYTES_WITHEX(7)
  354. #endif
  355. #endif
  356. #endif
  357. bdnz 53b
  358. cmpwi r0,0
  359. li r3,4
  360. li r7,0
  361. bne 114b
  362. 63: srwi. r0,r5,2
  363. mtctr r0
  364. beq 64f
  365. 30: lwzu r0,4(r4)
  366. 31: stwu r0,4(r6)
  367. bdnz 30b
  368. 64: andi. r0,r5,3
  369. mtctr r0
  370. beq+ 65f
  371. 40: lbz r0,4(r4)
  372. 41: stb r0,4(r6)
  373. addi r4,r4,1
  374. addi r6,r6,1
  375. bdnz 40b
  376. 65: li r3,0
  377. blr
  378. /* read fault, initial single-byte copy */
  379. 100: li r9,0
  380. b 90f
  381. /* write fault, initial single-byte copy */
  382. 101: li r9,1
  383. 90: subf r5,r8,r5
  384. li r3,0
  385. b 99f
  386. /* read fault, initial word copy */
  387. 102: li r9,0
  388. b 91f
  389. /* write fault, initial word copy */
  390. 103: li r9,1
  391. 91: li r3,2
  392. b 99f
  393. /*
  394. * this stuff handles faults in the cacheline loop and branches to either
  395. * 104f (if in read part) or 105f (if in write part), after updating r5
  396. */
  397. COPY_16_BYTES_EXCODE(0)
  398. #if L1_CACHE_BYTES >= 32
  399. COPY_16_BYTES_EXCODE(1)
  400. #if L1_CACHE_BYTES >= 64
  401. COPY_16_BYTES_EXCODE(2)
  402. COPY_16_BYTES_EXCODE(3)
  403. #if L1_CACHE_BYTES >= 128
  404. COPY_16_BYTES_EXCODE(4)
  405. COPY_16_BYTES_EXCODE(5)
  406. COPY_16_BYTES_EXCODE(6)
  407. COPY_16_BYTES_EXCODE(7)
  408. #endif
  409. #endif
  410. #endif
  411. /* read fault in cacheline loop */
  412. 104: li r9,0
  413. b 92f
  414. /* fault on dcbz (effectively a write fault) */
  415. /* or write fault in cacheline loop */
  416. 105: li r9,1
  417. 92: li r3,LG_CACHELINE_BYTES
  418. mfctr r8
  419. add r0,r0,r8
  420. b 106f
  421. /* read fault in final word loop */
  422. 108: li r9,0
  423. b 93f
  424. /* write fault in final word loop */
  425. 109: li r9,1
  426. 93: andi. r5,r5,3
  427. li r3,2
  428. b 99f
  429. /* read fault in final byte loop */
  430. 110: li r9,0
  431. b 94f
  432. /* write fault in final byte loop */
  433. 111: li r9,1
  434. 94: li r5,0
  435. li r3,0
  436. /*
  437. * At this stage the number of bytes not copied is
  438. * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
  439. */
  440. 99: mfctr r0
  441. 106: slw r3,r0,r3
  442. add. r3,r3,r5
  443. beq 120f /* shouldn't happen */
  444. cmpwi 0,r9,0
  445. bne 120f
  446. /* for a read fault, first try to continue the copy one byte at a time */
  447. mtctr r3
  448. 130: lbz r0,4(r4)
  449. 131: stb r0,4(r6)
  450. addi r4,r4,1
  451. addi r6,r6,1
  452. bdnz 130b
  453. /* then clear out the destination: r3 bytes starting at 4(r6) */
  454. 132: mfctr r3
  455. srwi. r0,r3,2
  456. li r9,0
  457. mtctr r0
  458. beq 113f
  459. 112: stwu r9,4(r6)
  460. bdnz 112b
  461. 113: andi. r0,r3,3
  462. mtctr r0
  463. beq 120f
  464. 114: stb r9,4(r6)
  465. addi r6,r6,1
  466. bdnz 114b
  467. 120: blr
  468. .section __ex_table,"a"
  469. .align 2
  470. .long 30b,108b
  471. .long 31b,109b
  472. .long 40b,110b
  473. .long 41b,111b
  474. .long 130b,132b
  475. .long 131b,120b
  476. .long 112b,120b
  477. .long 114b,120b
  478. .text