aes-neon.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /*
  2. * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON
  3. *
  4. * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/linkage.h>
  11. #include <asm/assembler.h>
  12. #define AES_ENTRY(func) ENTRY(neon_ ## func)
  13. #define AES_ENDPROC(func) ENDPROC(neon_ ## func)
  14. /* multiply by polynomial 'x' in GF(2^8) */
  15. .macro mul_by_x, out, in, temp, const
  16. sshr \temp, \in, #7
  17. add \out, \in, \in
  18. and \temp, \temp, \const
  19. eor \out, \out, \temp
  20. .endm
  21. /* preload the entire Sbox */
  22. .macro prepare, sbox, shiftrows, temp
  23. adr \temp, \sbox
  24. movi v12.16b, #0x40
  25. ldr q13, \shiftrows
  26. movi v14.16b, #0x1b
  27. ld1 {v16.16b-v19.16b}, [\temp], #64
  28. ld1 {v20.16b-v23.16b}, [\temp], #64
  29. ld1 {v24.16b-v27.16b}, [\temp], #64
  30. ld1 {v28.16b-v31.16b}, [\temp]
  31. .endm
  32. /* do preload for encryption */
  33. .macro enc_prepare, ignore0, ignore1, temp
  34. prepare .LForward_Sbox, .LForward_ShiftRows, \temp
  35. .endm
  36. .macro enc_switch_key, ignore0, ignore1, temp
  37. /* do nothing */
  38. .endm
  39. /* do preload for decryption */
  40. .macro dec_prepare, ignore0, ignore1, temp
  41. prepare .LReverse_Sbox, .LReverse_ShiftRows, \temp
  42. .endm
  43. /* apply SubBytes transformation using the the preloaded Sbox */
  44. .macro sub_bytes, in
  45. sub v9.16b, \in\().16b, v12.16b
  46. tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b
  47. sub v10.16b, v9.16b, v12.16b
  48. tbx \in\().16b, {v20.16b-v23.16b}, v9.16b
  49. sub v11.16b, v10.16b, v12.16b
  50. tbx \in\().16b, {v24.16b-v27.16b}, v10.16b
  51. tbx \in\().16b, {v28.16b-v31.16b}, v11.16b
  52. .endm
  53. /* apply MixColumns transformation */
  54. .macro mix_columns, in
  55. mul_by_x v10.16b, \in\().16b, v9.16b, v14.16b
  56. rev32 v8.8h, \in\().8h
  57. eor \in\().16b, v10.16b, \in\().16b
  58. shl v9.4s, v8.4s, #24
  59. shl v11.4s, \in\().4s, #24
  60. sri v9.4s, v8.4s, #8
  61. sri v11.4s, \in\().4s, #8
  62. eor v9.16b, v9.16b, v8.16b
  63. eor v10.16b, v10.16b, v9.16b
  64. eor \in\().16b, v10.16b, v11.16b
  65. .endm
  66. /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */
  67. .macro inv_mix_columns, in
  68. mul_by_x v11.16b, \in\().16b, v10.16b, v14.16b
  69. mul_by_x v11.16b, v11.16b, v10.16b, v14.16b
  70. eor \in\().16b, \in\().16b, v11.16b
  71. rev32 v11.8h, v11.8h
  72. eor \in\().16b, \in\().16b, v11.16b
  73. mix_columns \in
  74. .endm
  75. .macro do_block, enc, in, rounds, rk, rkp, i
  76. ld1 {v15.4s}, [\rk]
  77. add \rkp, \rk, #16
  78. mov \i, \rounds
  79. 1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
  80. tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
  81. sub_bytes \in
  82. ld1 {v15.4s}, [\rkp], #16
  83. subs \i, \i, #1
  84. beq 2222f
  85. .if \enc == 1
  86. mix_columns \in
  87. .else
  88. inv_mix_columns \in
  89. .endif
  90. b 1111b
  91. 2222: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
  92. .endm
  93. .macro encrypt_block, in, rounds, rk, rkp, i
  94. do_block 1, \in, \rounds, \rk, \rkp, \i
  95. .endm
  96. .macro decrypt_block, in, rounds, rk, rkp, i
  97. do_block 0, \in, \rounds, \rk, \rkp, \i
  98. .endm
  99. /*
  100. * Interleaved versions: functionally equivalent to the
  101. * ones above, but applied to 2 or 4 AES states in parallel.
  102. */
  103. .macro sub_bytes_2x, in0, in1
  104. sub v8.16b, \in0\().16b, v12.16b
  105. sub v9.16b, \in1\().16b, v12.16b
  106. tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
  107. tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
  108. sub v10.16b, v8.16b, v12.16b
  109. sub v11.16b, v9.16b, v12.16b
  110. tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
  111. tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
  112. sub v8.16b, v10.16b, v12.16b
  113. sub v9.16b, v11.16b, v12.16b
  114. tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b
  115. tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b
  116. tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
  117. tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
  118. .endm
  119. .macro sub_bytes_4x, in0, in1, in2, in3
  120. sub v8.16b, \in0\().16b, v12.16b
  121. tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
  122. sub v9.16b, \in1\().16b, v12.16b
  123. tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
  124. sub v10.16b, \in2\().16b, v12.16b
  125. tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b
  126. sub v11.16b, \in3\().16b, v12.16b
  127. tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b
  128. tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
  129. tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
  130. sub v8.16b, v8.16b, v12.16b
  131. tbx \in2\().16b, {v20.16b-v23.16b}, v10.16b
  132. sub v9.16b, v9.16b, v12.16b
  133. tbx \in3\().16b, {v20.16b-v23.16b}, v11.16b
  134. sub v10.16b, v10.16b, v12.16b
  135. tbx \in0\().16b, {v24.16b-v27.16b}, v8.16b
  136. sub v11.16b, v11.16b, v12.16b
  137. tbx \in1\().16b, {v24.16b-v27.16b}, v9.16b
  138. sub v8.16b, v8.16b, v12.16b
  139. tbx \in2\().16b, {v24.16b-v27.16b}, v10.16b
  140. sub v9.16b, v9.16b, v12.16b
  141. tbx \in3\().16b, {v24.16b-v27.16b}, v11.16b
  142. sub v10.16b, v10.16b, v12.16b
  143. tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
  144. sub v11.16b, v11.16b, v12.16b
  145. tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
  146. tbx \in2\().16b, {v28.16b-v31.16b}, v10.16b
  147. tbx \in3\().16b, {v28.16b-v31.16b}, v11.16b
  148. .endm
  149. .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const
  150. sshr \tmp0\().16b, \in0\().16b, #7
  151. add \out0\().16b, \in0\().16b, \in0\().16b
  152. sshr \tmp1\().16b, \in1\().16b, #7
  153. and \tmp0\().16b, \tmp0\().16b, \const\().16b
  154. add \out1\().16b, \in1\().16b, \in1\().16b
  155. and \tmp1\().16b, \tmp1\().16b, \const\().16b
  156. eor \out0\().16b, \out0\().16b, \tmp0\().16b
  157. eor \out1\().16b, \out1\().16b, \tmp1\().16b
  158. .endm
  159. .macro mix_columns_2x, in0, in1
  160. mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
  161. rev32 v10.8h, \in0\().8h
  162. rev32 v11.8h, \in1\().8h
  163. eor \in0\().16b, v8.16b, \in0\().16b
  164. eor \in1\().16b, v9.16b, \in1\().16b
  165. shl v12.4s, v10.4s, #24
  166. shl v13.4s, v11.4s, #24
  167. eor v8.16b, v8.16b, v10.16b
  168. sri v12.4s, v10.4s, #8
  169. shl v10.4s, \in0\().4s, #24
  170. eor v9.16b, v9.16b, v11.16b
  171. sri v13.4s, v11.4s, #8
  172. shl v11.4s, \in1\().4s, #24
  173. sri v10.4s, \in0\().4s, #8
  174. eor \in0\().16b, v8.16b, v12.16b
  175. sri v11.4s, \in1\().4s, #8
  176. eor \in1\().16b, v9.16b, v13.16b
  177. eor \in0\().16b, v10.16b, \in0\().16b
  178. eor \in1\().16b, v11.16b, \in1\().16b
  179. .endm
  180. .macro inv_mix_cols_2x, in0, in1
  181. mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
  182. mul_by_x_2x v8, v9, v8, v9, v10, v11, v14
  183. eor \in0\().16b, \in0\().16b, v8.16b
  184. eor \in1\().16b, \in1\().16b, v9.16b
  185. rev32 v8.8h, v8.8h
  186. rev32 v9.8h, v9.8h
  187. eor \in0\().16b, \in0\().16b, v8.16b
  188. eor \in1\().16b, \in1\().16b, v9.16b
  189. mix_columns_2x \in0, \in1
  190. .endm
  191. .macro inv_mix_cols_4x, in0, in1, in2, in3
  192. mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
  193. mul_by_x_2x v10, v11, \in2, \in3, v12, v13, v14
  194. mul_by_x_2x v8, v9, v8, v9, v12, v13, v14
  195. mul_by_x_2x v10, v11, v10, v11, v12, v13, v14
  196. eor \in0\().16b, \in0\().16b, v8.16b
  197. eor \in1\().16b, \in1\().16b, v9.16b
  198. eor \in2\().16b, \in2\().16b, v10.16b
  199. eor \in3\().16b, \in3\().16b, v11.16b
  200. rev32 v8.8h, v8.8h
  201. rev32 v9.8h, v9.8h
  202. rev32 v10.8h, v10.8h
  203. rev32 v11.8h, v11.8h
  204. eor \in0\().16b, \in0\().16b, v8.16b
  205. eor \in1\().16b, \in1\().16b, v9.16b
  206. eor \in2\().16b, \in2\().16b, v10.16b
  207. eor \in3\().16b, \in3\().16b, v11.16b
  208. mix_columns_2x \in0, \in1
  209. mix_columns_2x \in2, \in3
  210. .endm
  211. .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
  212. ld1 {v15.4s}, [\rk]
  213. add \rkp, \rk, #16
  214. mov \i, \rounds
  215. 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
  216. eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
  217. sub_bytes_2x \in0, \in1
  218. tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
  219. tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
  220. ld1 {v15.4s}, [\rkp], #16
  221. subs \i, \i, #1
  222. beq 2222f
  223. .if \enc == 1
  224. mix_columns_2x \in0, \in1
  225. ldr q13, .LForward_ShiftRows
  226. .else
  227. inv_mix_cols_2x \in0, \in1
  228. ldr q13, .LReverse_ShiftRows
  229. .endif
  230. movi v12.16b, #0x40
  231. b 1111b
  232. 2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
  233. eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
  234. .endm
  235. .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
  236. ld1 {v15.4s}, [\rk]
  237. add \rkp, \rk, #16
  238. mov \i, \rounds
  239. 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
  240. eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
  241. eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
  242. eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
  243. sub_bytes_4x \in0, \in1, \in2, \in3
  244. tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
  245. tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
  246. tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
  247. tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
  248. ld1 {v15.4s}, [\rkp], #16
  249. subs \i, \i, #1
  250. beq 2222f
  251. .if \enc == 1
  252. mix_columns_2x \in0, \in1
  253. mix_columns_2x \in2, \in3
  254. ldr q13, .LForward_ShiftRows
  255. .else
  256. inv_mix_cols_4x \in0, \in1, \in2, \in3
  257. ldr q13, .LReverse_ShiftRows
  258. .endif
  259. movi v12.16b, #0x40
  260. b 1111b
  261. 2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
  262. eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
  263. eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
  264. eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
  265. .endm
  266. .macro encrypt_block2x, in0, in1, rounds, rk, rkp, i
  267. do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i
  268. .endm
  269. .macro decrypt_block2x, in0, in1, rounds, rk, rkp, i
  270. do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i
  271. .endm
  272. .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
  273. do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
  274. .endm
  275. .macro decrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
  276. do_block_4x 0, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
  277. .endm
  278. #include "aes-modes.S"
  279. .text
  280. .align 4
  281. .LForward_ShiftRows:
  282. CPU_LE( .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3 )
  283. CPU_LE( .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb )
  284. CPU_BE( .byte 0xb, 0x6, 0x1, 0xc, 0x7, 0x2, 0xd, 0x8 )
  285. CPU_BE( .byte 0x3, 0xe, 0x9, 0x4, 0xf, 0xa, 0x5, 0x0 )
  286. .LReverse_ShiftRows:
  287. CPU_LE( .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb )
  288. CPU_LE( .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3 )
  289. CPU_BE( .byte 0x3, 0x6, 0x9, 0xc, 0xf, 0x2, 0x5, 0x8 )
  290. CPU_BE( .byte 0xb, 0xe, 0x1, 0x4, 0x7, 0xa, 0xd, 0x0 )
  291. .LForward_Sbox:
  292. .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
  293. .byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
  294. .byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
  295. .byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
  296. .byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
  297. .byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
  298. .byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
  299. .byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
  300. .byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
  301. .byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
  302. .byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
  303. .byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
  304. .byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
  305. .byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
  306. .byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
  307. .byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
  308. .byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
  309. .byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
  310. .byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
  311. .byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
  312. .byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
  313. .byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
  314. .byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
  315. .byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
  316. .byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
  317. .byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
  318. .byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
  319. .byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
  320. .byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
  321. .byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
  322. .byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
  323. .byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
  324. .LReverse_Sbox:
  325. .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
  326. .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
  327. .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
  328. .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
  329. .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
  330. .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
  331. .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
  332. .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
  333. .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
  334. .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
  335. .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
  336. .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
  337. .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
  338. .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
  339. .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
  340. .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
  341. .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
  342. .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
  343. .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
  344. .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
  345. .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
  346. .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
  347. .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
  348. .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
  349. .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
  350. .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
  351. .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
  352. .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
  353. .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
  354. .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
  355. .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
  356. .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d