ev6-memset.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. /*
  2. * arch/alpha/lib/ev6-memset.S
  3. *
  4. * This is an efficient (and relatively small) implementation of the C library
  5. * "memset()" function for the 21264 implementation of Alpha.
  6. *
  7. * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
  8. *
  9. * Much of the information about 21264 scheduling/coding comes from:
  10. * Compiler Writer's Guide for the Alpha 21264
  11. * abbreviated as 'CWG' in other comments here
  12. * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
  13. * Scheduling notation:
  14. * E - either cluster
  15. * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
  16. * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  17. * The algorithm for the leading and trailing quadwords remains the same,
  18. * however the loop has been unrolled to enable better memory throughput,
  19. * and the code has been replicated for each of the entry points: __memset
  20. * and __memsetw to permit better scheduling to eliminate the stalling
  21. * encountered during the mask replication.
  22. * A future enhancement might be to put in a byte store loop for really
  23. * small (say < 32 bytes) memset()s. Whether or not that change would be
  24. * a win in the kernel would depend upon the contextual usage.
  25. * WARNING: Maintaining this is going to be more work than the above version,
  26. * as fixes will need to be made in multiple places. The performance gain
  27. * is worth it.
  28. */
  29. .set noat
  30. .set noreorder
  31. .text
  32. .globl memset
  33. .globl __memset
  34. .globl ___memset
  35. .globl __memsetw
  36. .globl __constant_c_memset
  37. .ent ___memset
  38. .align 5
  39. ___memset:
  40. .frame $30,0,$26,0
  41. .prologue 0
  42. /*
  43. * Serious stalling happens. The only way to mitigate this is to
  44. * undertake a major re-write to interleave the constant materialization
  45. * with other parts of the fall-through code. This is important, even
  46. * though it makes maintenance tougher.
  47. * Do this later.
  48. */
  49. and $17,255,$1 # E : 00000000000000ch
  50. insbl $17,1,$2 # U : 000000000000ch00
  51. bis $16,$16,$0 # E : return value
  52. ble $18,end_b # U : zero length requested?
  53. addq $18,$16,$6 # E : max address to write to
  54. bis $1,$2,$17 # E : 000000000000chch
  55. insbl $1,2,$3 # U : 0000000000ch0000
  56. insbl $1,3,$4 # U : 00000000ch000000
  57. or $3,$4,$3 # E : 00000000chch0000
  58. inswl $17,4,$5 # U : 0000chch00000000
  59. xor $16,$6,$1 # E : will complete write be within one quadword?
  60. inswl $17,6,$2 # U : chch000000000000
  61. or $17,$3,$17 # E : 00000000chchchch
  62. or $2,$5,$2 # E : chchchch00000000
  63. bic $1,7,$1 # E : fit within a single quadword?
  64. and $16,7,$3 # E : Target addr misalignment
  65. or $17,$2,$17 # E : chchchchchchchch
  66. beq $1,within_quad_b # U :
  67. nop # E :
  68. beq $3,aligned_b # U : target is 0mod8
  69. /*
  70. * Target address is misaligned, and won't fit within a quadword
  71. */
  72. ldq_u $4,0($16) # L : Fetch first partial
  73. bis $16,$16,$5 # E : Save the address
  74. insql $17,$16,$2 # U : Insert new bytes
  75. subq $3,8,$3 # E : Invert (for addressing uses)
  76. addq $18,$3,$18 # E : $18 is new count ($3 is negative)
  77. mskql $4,$16,$4 # U : clear relevant parts of the quad
  78. subq $16,$3,$16 # E : $16 is new aligned destination
  79. bis $2,$4,$1 # E : Final bytes
  80. nop
  81. stq_u $1,0($5) # L : Store result
  82. nop
  83. nop
  84. .align 4
  85. aligned_b:
  86. /*
  87. * We are now guaranteed to be quad aligned, with at least
  88. * one partial quad to write.
  89. */
  90. sra $18,3,$3 # U : Number of remaining quads to write
  91. and $18,7,$18 # E : Number of trailing bytes to write
  92. bis $16,$16,$5 # E : Save dest address
  93. beq $3,no_quad_b # U : tail stuff only
  94. /*
  95. * it's worth the effort to unroll this and use wh64 if possible
  96. * Lifted a bunch of code from clear_user.S
  97. * At this point, entry values are:
  98. * $16 Current destination address
  99. * $5 A copy of $16
  100. * $6 The max quadword address to write to
  101. * $18 Number trailer bytes
  102. * $3 Number quads to write
  103. */
  104. and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop)
  105. subq $3, 16, $4 # E : Only try to unroll if > 128 bytes
  106. subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64)
  107. blt $4, loop_b # U :
  108. /*
  109. * We know we've got at least 16 quads, minimum of one trip
  110. * through unrolled loop. Do a quad at a time to get us 0mod64
  111. * aligned.
  112. */
  113. nop # E :
  114. nop # E :
  115. nop # E :
  116. beq $1, $bigalign_b # U :
  117. $alignmod64_b:
  118. stq $17, 0($5) # L :
  119. subq $3, 1, $3 # E : For consistency later
  120. addq $1, 8, $1 # E : Increment towards zero for alignment
  121. addq $5, 8, $4 # E : Initial wh64 address (filler instruction)
  122. nop
  123. nop
  124. addq $5, 8, $5 # E : Inc address
  125. blt $1, $alignmod64_b # U :
  126. $bigalign_b:
  127. /*
  128. * $3 - number quads left to go
  129. * $5 - target address (aligned 0mod64)
  130. * $17 - mask of stuff to store
  131. * Scratch registers available: $7, $2, $4, $1
  132. * we know that we'll be taking a minimum of one trip through
  133. * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
  134. * Assumes the wh64 needs to be for 2 trips through the loop in the future
  135. * The wh64 is issued on for the starting destination address for trip +2
  136. * through the loop, and if there are less than two trips left, the target
  137. * address will be for the current trip.
  138. */
  139. $do_wh64_b:
  140. wh64 ($4) # L1 : memory subsystem write hint
  141. subq $3, 24, $2 # E : For determining future wh64 addresses
  142. stq $17, 0($5) # L :
  143. nop # E :
  144. addq $5, 128, $4 # E : speculative target of next wh64
  145. stq $17, 8($5) # L :
  146. stq $17, 16($5) # L :
  147. addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr)
  148. stq $17, 24($5) # L :
  149. stq $17, 32($5) # L :
  150. cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle
  151. nop
  152. stq $17, 40($5) # L :
  153. stq $17, 48($5) # L :
  154. subq $3, 16, $2 # E : Repeat the loop at least once more?
  155. nop
  156. stq $17, 56($5) # L :
  157. addq $5, 64, $5 # E :
  158. subq $3, 8, $3 # E :
  159. bge $2, $do_wh64_b # U :
  160. nop
  161. nop
  162. nop
  163. beq $3, no_quad_b # U : Might have finished already
  164. .align 4
  165. /*
  166. * Simple loop for trailing quadwords, or for small amounts
  167. * of data (where we can't use an unrolled loop and wh64)
  168. */
  169. loop_b:
  170. stq $17,0($5) # L :
  171. subq $3,1,$3 # E : Decrement number quads left
  172. addq $5,8,$5 # E : Inc address
  173. bne $3,loop_b # U : more?
  174. no_quad_b:
  175. /*
  176. * Write 0..7 trailing bytes.
  177. */
  178. nop # E :
  179. beq $18,end_b # U : All done?
  180. ldq $7,0($5) # L :
  181. mskqh $7,$6,$2 # U : Mask final quad
  182. insqh $17,$6,$4 # U : New bits
  183. bis $2,$4,$1 # E : Put it all together
  184. stq $1,0($5) # L : And back to memory
  185. ret $31,($26),1 # L0 :
  186. within_quad_b:
  187. ldq_u $1,0($16) # L :
  188. insql $17,$16,$2 # U : New bits
  189. mskql $1,$16,$4 # U : Clear old
  190. bis $2,$4,$2 # E : New result
  191. mskql $2,$6,$4 # U :
  192. mskqh $1,$6,$2 # U :
  193. bis $2,$4,$1 # E :
  194. stq_u $1,0($16) # L :
  195. end_b:
  196. nop
  197. nop
  198. nop
  199. ret $31,($26),1 # L0 :
  200. .end ___memset
  201. /*
  202. * This is the original body of code, prior to replication and
  203. * rescheduling. Leave it here, as there may be calls to this
  204. * entry point.
  205. */
  206. .align 4
  207. .ent __constant_c_memset
  208. __constant_c_memset:
  209. .frame $30,0,$26,0
  210. .prologue 0
  211. addq $18,$16,$6 # E : max address to write to
  212. bis $16,$16,$0 # E : return value
  213. xor $16,$6,$1 # E : will complete write be within one quadword?
  214. ble $18,end # U : zero length requested?
  215. bic $1,7,$1 # E : fit within a single quadword
  216. beq $1,within_one_quad # U :
  217. and $16,7,$3 # E : Target addr misalignment
  218. beq $3,aligned # U : target is 0mod8
  219. /*
  220. * Target address is misaligned, and won't fit within a quadword
  221. */
  222. ldq_u $4,0($16) # L : Fetch first partial
  223. bis $16,$16,$5 # E : Save the address
  224. insql $17,$16,$2 # U : Insert new bytes
  225. subq $3,8,$3 # E : Invert (for addressing uses)
  226. addq $18,$3,$18 # E : $18 is new count ($3 is negative)
  227. mskql $4,$16,$4 # U : clear relevant parts of the quad
  228. subq $16,$3,$16 # E : $16 is new aligned destination
  229. bis $2,$4,$1 # E : Final bytes
  230. nop
  231. stq_u $1,0($5) # L : Store result
  232. nop
  233. nop
  234. .align 4
  235. aligned:
  236. /*
  237. * We are now guaranteed to be quad aligned, with at least
  238. * one partial quad to write.
  239. */
  240. sra $18,3,$3 # U : Number of remaining quads to write
  241. and $18,7,$18 # E : Number of trailing bytes to write
  242. bis $16,$16,$5 # E : Save dest address
  243. beq $3,no_quad # U : tail stuff only
  244. /*
  245. * it's worth the effort to unroll this and use wh64 if possible
  246. * Lifted a bunch of code from clear_user.S
  247. * At this point, entry values are:
  248. * $16 Current destination address
  249. * $5 A copy of $16
  250. * $6 The max quadword address to write to
  251. * $18 Number trailer bytes
  252. * $3 Number quads to write
  253. */
  254. and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop)
  255. subq $3, 16, $4 # E : Only try to unroll if > 128 bytes
  256. subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64)
  257. blt $4, loop # U :
  258. /*
  259. * We know we've got at least 16 quads, minimum of one trip
  260. * through unrolled loop. Do a quad at a time to get us 0mod64
  261. * aligned.
  262. */
  263. nop # E :
  264. nop # E :
  265. nop # E :
  266. beq $1, $bigalign # U :
  267. $alignmod64:
  268. stq $17, 0($5) # L :
  269. subq $3, 1, $3 # E : For consistency later
  270. addq $1, 8, $1 # E : Increment towards zero for alignment
  271. addq $5, 8, $4 # E : Initial wh64 address (filler instruction)
  272. nop
  273. nop
  274. addq $5, 8, $5 # E : Inc address
  275. blt $1, $alignmod64 # U :
  276. $bigalign:
  277. /*
  278. * $3 - number quads left to go
  279. * $5 - target address (aligned 0mod64)
  280. * $17 - mask of stuff to store
  281. * Scratch registers available: $7, $2, $4, $1
  282. * we know that we'll be taking a minimum of one trip through
  283. * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
  284. * Assumes the wh64 needs to be for 2 trips through the loop in the future
  285. * The wh64 is issued on for the starting destination address for trip +2
  286. * through the loop, and if there are less than two trips left, the target
  287. * address will be for the current trip.
  288. */
  289. $do_wh64:
  290. wh64 ($4) # L1 : memory subsystem write hint
  291. subq $3, 24, $2 # E : For determining future wh64 addresses
  292. stq $17, 0($5) # L :
  293. nop # E :
  294. addq $5, 128, $4 # E : speculative target of next wh64
  295. stq $17, 8($5) # L :
  296. stq $17, 16($5) # L :
  297. addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr)
  298. stq $17, 24($5) # L :
  299. stq $17, 32($5) # L :
  300. cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle
  301. nop
  302. stq $17, 40($5) # L :
  303. stq $17, 48($5) # L :
  304. subq $3, 16, $2 # E : Repeat the loop at least once more?
  305. nop
  306. stq $17, 56($5) # L :
  307. addq $5, 64, $5 # E :
  308. subq $3, 8, $3 # E :
  309. bge $2, $do_wh64 # U :
  310. nop
  311. nop
  312. nop
  313. beq $3, no_quad # U : Might have finished already
  314. .align 4
  315. /*
  316. * Simple loop for trailing quadwords, or for small amounts
  317. * of data (where we can't use an unrolled loop and wh64)
  318. */
  319. loop:
  320. stq $17,0($5) # L :
  321. subq $3,1,$3 # E : Decrement number quads left
  322. addq $5,8,$5 # E : Inc address
  323. bne $3,loop # U : more?
  324. no_quad:
  325. /*
  326. * Write 0..7 trailing bytes.
  327. */
  328. nop # E :
  329. beq $18,end # U : All done?
  330. ldq $7,0($5) # L :
  331. mskqh $7,$6,$2 # U : Mask final quad
  332. insqh $17,$6,$4 # U : New bits
  333. bis $2,$4,$1 # E : Put it all together
  334. stq $1,0($5) # L : And back to memory
  335. ret $31,($26),1 # L0 :
  336. within_one_quad:
  337. ldq_u $1,0($16) # L :
  338. insql $17,$16,$2 # U : New bits
  339. mskql $1,$16,$4 # U : Clear old
  340. bis $2,$4,$2 # E : New result
  341. mskql $2,$6,$4 # U :
  342. mskqh $1,$6,$2 # U :
  343. bis $2,$4,$1 # E :
  344. stq_u $1,0($16) # L :
  345. end:
  346. nop
  347. nop
  348. nop
  349. ret $31,($26),1 # L0 :
  350. .end __constant_c_memset
  351. /*
  352. * This is a replicant of the __constant_c_memset code, rescheduled
  353. * to mask stalls. Note that entry point names also had to change
  354. */
  355. .align 5
  356. .ent __memsetw
  357. __memsetw:
  358. .frame $30,0,$26,0
  359. .prologue 0
  360. inswl $17,0,$5 # U : 000000000000c1c2
  361. inswl $17,2,$2 # U : 00000000c1c20000
  362. bis $16,$16,$0 # E : return value
  363. addq $18,$16,$6 # E : max address to write to
  364. ble $18, end_w # U : zero length requested?
  365. inswl $17,4,$3 # U : 0000c1c200000000
  366. inswl $17,6,$4 # U : c1c2000000000000
  367. xor $16,$6,$1 # E : will complete write be within one quadword?
  368. or $2,$5,$2 # E : 00000000c1c2c1c2
  369. or $3,$4,$17 # E : c1c2c1c200000000
  370. bic $1,7,$1 # E : fit within a single quadword
  371. and $16,7,$3 # E : Target addr misalignment
  372. or $17,$2,$17 # E : c1c2c1c2c1c2c1c2
  373. beq $1,within_quad_w # U :
  374. nop
  375. beq $3,aligned_w # U : target is 0mod8
  376. /*
  377. * Target address is misaligned, and won't fit within a quadword
  378. */
  379. ldq_u $4,0($16) # L : Fetch first partial
  380. bis $16,$16,$5 # E : Save the address
  381. insql $17,$16,$2 # U : Insert new bytes
  382. subq $3,8,$3 # E : Invert (for addressing uses)
  383. addq $18,$3,$18 # E : $18 is new count ($3 is negative)
  384. mskql $4,$16,$4 # U : clear relevant parts of the quad
  385. subq $16,$3,$16 # E : $16 is new aligned destination
  386. bis $2,$4,$1 # E : Final bytes
  387. nop
  388. stq_u $1,0($5) # L : Store result
  389. nop
  390. nop
  391. .align 4
  392. aligned_w:
  393. /*
  394. * We are now guaranteed to be quad aligned, with at least
  395. * one partial quad to write.
  396. */
  397. sra $18,3,$3 # U : Number of remaining quads to write
  398. and $18,7,$18 # E : Number of trailing bytes to write
  399. bis $16,$16,$5 # E : Save dest address
  400. beq $3,no_quad_w # U : tail stuff only
  401. /*
  402. * it's worth the effort to unroll this and use wh64 if possible
  403. * Lifted a bunch of code from clear_user.S
  404. * At this point, entry values are:
  405. * $16 Current destination address
  406. * $5 A copy of $16
  407. * $6 The max quadword address to write to
  408. * $18 Number trailer bytes
  409. * $3 Number quads to write
  410. */
  411. and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop)
  412. subq $3, 16, $4 # E : Only try to unroll if > 128 bytes
  413. subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64)
  414. blt $4, loop_w # U :
  415. /*
  416. * We know we've got at least 16 quads, minimum of one trip
  417. * through unrolled loop. Do a quad at a time to get us 0mod64
  418. * aligned.
  419. */
  420. nop # E :
  421. nop # E :
  422. nop # E :
  423. beq $1, $bigalign_w # U :
  424. $alignmod64_w:
  425. stq $17, 0($5) # L :
  426. subq $3, 1, $3 # E : For consistency later
  427. addq $1, 8, $1 # E : Increment towards zero for alignment
  428. addq $5, 8, $4 # E : Initial wh64 address (filler instruction)
  429. nop
  430. nop
  431. addq $5, 8, $5 # E : Inc address
  432. blt $1, $alignmod64_w # U :
  433. $bigalign_w:
  434. /*
  435. * $3 - number quads left to go
  436. * $5 - target address (aligned 0mod64)
  437. * $17 - mask of stuff to store
  438. * Scratch registers available: $7, $2, $4, $1
  439. * we know that we'll be taking a minimum of one trip through
  440. * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
  441. * Assumes the wh64 needs to be for 2 trips through the loop in the future
  442. * The wh64 is issued on for the starting destination address for trip +2
  443. * through the loop, and if there are less than two trips left, the target
  444. * address will be for the current trip.
  445. */
  446. $do_wh64_w:
  447. wh64 ($4) # L1 : memory subsystem write hint
  448. subq $3, 24, $2 # E : For determining future wh64 addresses
  449. stq $17, 0($5) # L :
  450. nop # E :
  451. addq $5, 128, $4 # E : speculative target of next wh64
  452. stq $17, 8($5) # L :
  453. stq $17, 16($5) # L :
  454. addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr)
  455. stq $17, 24($5) # L :
  456. stq $17, 32($5) # L :
  457. cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle
  458. nop
  459. stq $17, 40($5) # L :
  460. stq $17, 48($5) # L :
  461. subq $3, 16, $2 # E : Repeat the loop at least once more?
  462. nop
  463. stq $17, 56($5) # L :
  464. addq $5, 64, $5 # E :
  465. subq $3, 8, $3 # E :
  466. bge $2, $do_wh64_w # U :
  467. nop
  468. nop
  469. nop
  470. beq $3, no_quad_w # U : Might have finished already
  471. .align 4
  472. /*
  473. * Simple loop for trailing quadwords, or for small amounts
  474. * of data (where we can't use an unrolled loop and wh64)
  475. */
  476. loop_w:
  477. stq $17,0($5) # L :
  478. subq $3,1,$3 # E : Decrement number quads left
  479. addq $5,8,$5 # E : Inc address
  480. bne $3,loop_w # U : more?
  481. no_quad_w:
  482. /*
  483. * Write 0..7 trailing bytes.
  484. */
  485. nop # E :
  486. beq $18,end_w # U : All done?
  487. ldq $7,0($5) # L :
  488. mskqh $7,$6,$2 # U : Mask final quad
  489. insqh $17,$6,$4 # U : New bits
  490. bis $2,$4,$1 # E : Put it all together
  491. stq $1,0($5) # L : And back to memory
  492. ret $31,($26),1 # L0 :
  493. within_quad_w:
  494. ldq_u $1,0($16) # L :
  495. insql $17,$16,$2 # U : New bits
  496. mskql $1,$16,$4 # U : Clear old
  497. bis $2,$4,$2 # E : New result
  498. mskql $2,$6,$4 # U :
  499. mskqh $1,$6,$2 # U :
  500. bis $2,$4,$1 # E :
  501. stq_u $1,0($16) # L :
  502. end_w:
  503. nop
  504. nop
  505. nop
  506. ret $31,($26),1 # L0 :
  507. .end __memsetw
  508. memset = ___memset
  509. __memset = ___memset