twofish-x86_64-asm_64.S 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /***************************************************************************
  2. * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> *
  3. * *
  4. * This program is free software; you can redistribute it and/or modify *
  5. * it under the terms of the GNU General Public License as published by *
  6. * the Free Software Foundation; either version 2 of the License, or *
  7. * (at your option) any later version. *
  8. * *
  9. * This program is distributed in the hope that it will be useful, *
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  12. * GNU General Public License for more details. *
  13. * *
  14. * You should have received a copy of the GNU General Public License *
  15. * along with this program; if not, write to the *
  16. * Free Software Foundation, Inc., *
  17. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  18. ***************************************************************************/
  19. .file "twofish-x86_64-asm.S"
  20. .text
  21. #include <linux/linkage.h>
  22. #include <asm/asm-offsets.h>
  23. #define a_offset 0
  24. #define b_offset 4
  25. #define c_offset 8
  26. #define d_offset 12
  27. /* Structure of the crypto context struct*/
  28. #define s0 0 /* S0 Array 256 Words each */
  29. #define s1 1024 /* S1 Array */
  30. #define s2 2048 /* S2 Array */
  31. #define s3 3072 /* S3 Array */
  32. #define w 4096 /* 8 whitening keys (word) */
  33. #define k 4128 /* key 1-32 ( word ) */
  34. /* define a few register aliases to allow macro substitution */
  35. #define R0 %rax
  36. #define R0D %eax
  37. #define R0B %al
  38. #define R0H %ah
  39. #define R1 %rbx
  40. #define R1D %ebx
  41. #define R1B %bl
  42. #define R1H %bh
  43. #define R2 %rcx
  44. #define R2D %ecx
  45. #define R2B %cl
  46. #define R2H %ch
  47. #define R3 %rdx
  48. #define R3D %edx
  49. #define R3B %dl
  50. #define R3H %dh
  51. /* performs input whitening */
  52. #define input_whitening(src,context,offset)\
  53. xor w+offset(context), src;
  54. /* performs input whitening */
  55. #define output_whitening(src,context,offset)\
  56. xor w+16+offset(context), src;
  57. /*
  58. * a input register containing a (rotated 16)
  59. * b input register containing b
  60. * c input register containing c
  61. * d input register containing d (already rol $1)
  62. * operations on a and b are interleaved to increase performance
  63. */
  64. #define encrypt_round(a,b,c,d,round)\
  65. movzx b ## B, %edi;\
  66. mov s1(%r11,%rdi,4),%r8d;\
  67. movzx a ## B, %edi;\
  68. mov s2(%r11,%rdi,4),%r9d;\
  69. movzx b ## H, %edi;\
  70. ror $16, b ## D;\
  71. xor s2(%r11,%rdi,4),%r8d;\
  72. movzx a ## H, %edi;\
  73. ror $16, a ## D;\
  74. xor s3(%r11,%rdi,4),%r9d;\
  75. movzx b ## B, %edi;\
  76. xor s3(%r11,%rdi,4),%r8d;\
  77. movzx a ## B, %edi;\
  78. xor (%r11,%rdi,4), %r9d;\
  79. movzx b ## H, %edi;\
  80. ror $15, b ## D;\
  81. xor (%r11,%rdi,4), %r8d;\
  82. movzx a ## H, %edi;\
  83. xor s1(%r11,%rdi,4),%r9d;\
  84. add %r8d, %r9d;\
  85. add %r9d, %r8d;\
  86. add k+round(%r11), %r9d;\
  87. xor %r9d, c ## D;\
  88. rol $15, c ## D;\
  89. add k+4+round(%r11),%r8d;\
  90. xor %r8d, d ## D;
  91. /*
  92. * a input register containing a(rotated 16)
  93. * b input register containing b
  94. * c input register containing c
  95. * d input register containing d (already rol $1)
  96. * operations on a and b are interleaved to increase performance
  97. * during the round a and b are prepared for the output whitening
  98. */
  99. #define encrypt_last_round(a,b,c,d,round)\
  100. mov b ## D, %r10d;\
  101. shl $32, %r10;\
  102. movzx b ## B, %edi;\
  103. mov s1(%r11,%rdi,4),%r8d;\
  104. movzx a ## B, %edi;\
  105. mov s2(%r11,%rdi,4),%r9d;\
  106. movzx b ## H, %edi;\
  107. ror $16, b ## D;\
  108. xor s2(%r11,%rdi,4),%r8d;\
  109. movzx a ## H, %edi;\
  110. ror $16, a ## D;\
  111. xor s3(%r11,%rdi,4),%r9d;\
  112. movzx b ## B, %edi;\
  113. xor s3(%r11,%rdi,4),%r8d;\
  114. movzx a ## B, %edi;\
  115. xor (%r11,%rdi,4), %r9d;\
  116. xor a, %r10;\
  117. movzx b ## H, %edi;\
  118. xor (%r11,%rdi,4), %r8d;\
  119. movzx a ## H, %edi;\
  120. xor s1(%r11,%rdi,4),%r9d;\
  121. add %r8d, %r9d;\
  122. add %r9d, %r8d;\
  123. add k+round(%r11), %r9d;\
  124. xor %r9d, c ## D;\
  125. ror $1, c ## D;\
  126. add k+4+round(%r11),%r8d;\
  127. xor %r8d, d ## D
  128. /*
  129. * a input register containing a
  130. * b input register containing b (rotated 16)
  131. * c input register containing c (already rol $1)
  132. * d input register containing d
  133. * operations on a and b are interleaved to increase performance
  134. */
  135. #define decrypt_round(a,b,c,d,round)\
  136. movzx a ## B, %edi;\
  137. mov (%r11,%rdi,4), %r9d;\
  138. movzx b ## B, %edi;\
  139. mov s3(%r11,%rdi,4),%r8d;\
  140. movzx a ## H, %edi;\
  141. ror $16, a ## D;\
  142. xor s1(%r11,%rdi,4),%r9d;\
  143. movzx b ## H, %edi;\
  144. ror $16, b ## D;\
  145. xor (%r11,%rdi,4), %r8d;\
  146. movzx a ## B, %edi;\
  147. xor s2(%r11,%rdi,4),%r9d;\
  148. movzx b ## B, %edi;\
  149. xor s1(%r11,%rdi,4),%r8d;\
  150. movzx a ## H, %edi;\
  151. ror $15, a ## D;\
  152. xor s3(%r11,%rdi,4),%r9d;\
  153. movzx b ## H, %edi;\
  154. xor s2(%r11,%rdi,4),%r8d;\
  155. add %r8d, %r9d;\
  156. add %r9d, %r8d;\
  157. add k+round(%r11), %r9d;\
  158. xor %r9d, c ## D;\
  159. add k+4+round(%r11),%r8d;\
  160. xor %r8d, d ## D;\
  161. rol $15, d ## D;
  162. /*
  163. * a input register containing a
  164. * b input register containing b
  165. * c input register containing c (already rol $1)
  166. * d input register containing d
  167. * operations on a and b are interleaved to increase performance
  168. * during the round a and b are prepared for the output whitening
  169. */
  170. #define decrypt_last_round(a,b,c,d,round)\
  171. movzx a ## B, %edi;\
  172. mov (%r11,%rdi,4), %r9d;\
  173. movzx b ## B, %edi;\
  174. mov s3(%r11,%rdi,4),%r8d;\
  175. movzx b ## H, %edi;\
  176. ror $16, b ## D;\
  177. xor (%r11,%rdi,4), %r8d;\
  178. movzx a ## H, %edi;\
  179. mov b ## D, %r10d;\
  180. shl $32, %r10;\
  181. xor a, %r10;\
  182. ror $16, a ## D;\
  183. xor s1(%r11,%rdi,4),%r9d;\
  184. movzx b ## B, %edi;\
  185. xor s1(%r11,%rdi,4),%r8d;\
  186. movzx a ## B, %edi;\
  187. xor s2(%r11,%rdi,4),%r9d;\
  188. movzx b ## H, %edi;\
  189. xor s2(%r11,%rdi,4),%r8d;\
  190. movzx a ## H, %edi;\
  191. xor s3(%r11,%rdi,4),%r9d;\
  192. add %r8d, %r9d;\
  193. add %r9d, %r8d;\
  194. add k+round(%r11), %r9d;\
  195. xor %r9d, c ## D;\
  196. add k+4+round(%r11),%r8d;\
  197. xor %r8d, d ## D;\
  198. ror $1, d ## D;
  199. ENTRY(twofish_enc_blk)
  200. pushq R1
  201. /* %rdi contains the ctx address */
  202. /* %rsi contains the output address */
  203. /* %rdx contains the input address */
  204. /* ctx address is moved to free one non-rex register
  205. as target for the 8bit high operations */
  206. mov %rdi, %r11
  207. movq (R3), R1
  208. movq 8(R3), R3
  209. input_whitening(R1,%r11,a_offset)
  210. input_whitening(R3,%r11,c_offset)
  211. mov R1D, R0D
  212. rol $16, R0D
  213. shr $32, R1
  214. mov R3D, R2D
  215. shr $32, R3
  216. rol $1, R3D
  217. encrypt_round(R0,R1,R2,R3,0);
  218. encrypt_round(R2,R3,R0,R1,8);
  219. encrypt_round(R0,R1,R2,R3,2*8);
  220. encrypt_round(R2,R3,R0,R1,3*8);
  221. encrypt_round(R0,R1,R2,R3,4*8);
  222. encrypt_round(R2,R3,R0,R1,5*8);
  223. encrypt_round(R0,R1,R2,R3,6*8);
  224. encrypt_round(R2,R3,R0,R1,7*8);
  225. encrypt_round(R0,R1,R2,R3,8*8);
  226. encrypt_round(R2,R3,R0,R1,9*8);
  227. encrypt_round(R0,R1,R2,R3,10*8);
  228. encrypt_round(R2,R3,R0,R1,11*8);
  229. encrypt_round(R0,R1,R2,R3,12*8);
  230. encrypt_round(R2,R3,R0,R1,13*8);
  231. encrypt_round(R0,R1,R2,R3,14*8);
  232. encrypt_last_round(R2,R3,R0,R1,15*8);
  233. output_whitening(%r10,%r11,a_offset)
  234. movq %r10, (%rsi)
  235. shl $32, R1
  236. xor R0, R1
  237. output_whitening(R1,%r11,c_offset)
  238. movq R1, 8(%rsi)
  239. popq R1
  240. movl $1,%eax
  241. ret
  242. ENDPROC(twofish_enc_blk)
  243. ENTRY(twofish_dec_blk)
  244. pushq R1
  245. /* %rdi contains the ctx address */
  246. /* %rsi contains the output address */
  247. /* %rdx contains the input address */
  248. /* ctx address is moved to free one non-rex register
  249. as target for the 8bit high operations */
  250. mov %rdi, %r11
  251. movq (R3), R1
  252. movq 8(R3), R3
  253. output_whitening(R1,%r11,a_offset)
  254. output_whitening(R3,%r11,c_offset)
  255. mov R1D, R0D
  256. shr $32, R1
  257. rol $16, R1D
  258. mov R3D, R2D
  259. shr $32, R3
  260. rol $1, R2D
  261. decrypt_round(R0,R1,R2,R3,15*8);
  262. decrypt_round(R2,R3,R0,R1,14*8);
  263. decrypt_round(R0,R1,R2,R3,13*8);
  264. decrypt_round(R2,R3,R0,R1,12*8);
  265. decrypt_round(R0,R1,R2,R3,11*8);
  266. decrypt_round(R2,R3,R0,R1,10*8);
  267. decrypt_round(R0,R1,R2,R3,9*8);
  268. decrypt_round(R2,R3,R0,R1,8*8);
  269. decrypt_round(R0,R1,R2,R3,7*8);
  270. decrypt_round(R2,R3,R0,R1,6*8);
  271. decrypt_round(R0,R1,R2,R3,5*8);
  272. decrypt_round(R2,R3,R0,R1,4*8);
  273. decrypt_round(R0,R1,R2,R3,3*8);
  274. decrypt_round(R2,R3,R0,R1,2*8);
  275. decrypt_round(R0,R1,R2,R3,1*8);
  276. decrypt_last_round(R2,R3,R0,R1,0);
  277. input_whitening(%r10,%r11,a_offset)
  278. movq %r10, (%rsi)
  279. shl $32, R1
  280. xor R0, R1
  281. input_whitening(R1,%r11,c_offset)
  282. movq R1, 8(%rsi)
  283. popq R1
  284. movl $1,%eax
  285. ret
  286. ENDPROC(twofish_dec_blk)