poly1305-avx2-x86_64.S 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. /*
  2. * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
  3. *
  4. * Copyright (C) 2015 Martin Willi
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/linkage.h>
  12. .data
  13. .align 32
  14. ANMASK: .octa 0x0000000003ffffff0000000003ffffff
  15. .octa 0x0000000003ffffff0000000003ffffff
  16. ORMASK: .octa 0x00000000010000000000000001000000
  17. .octa 0x00000000010000000000000001000000
  18. .text
  19. #define h0 0x00(%rdi)
  20. #define h1 0x04(%rdi)
  21. #define h2 0x08(%rdi)
  22. #define h3 0x0c(%rdi)
  23. #define h4 0x10(%rdi)
  24. #define r0 0x00(%rdx)
  25. #define r1 0x04(%rdx)
  26. #define r2 0x08(%rdx)
  27. #define r3 0x0c(%rdx)
  28. #define r4 0x10(%rdx)
  29. #define u0 0x00(%r8)
  30. #define u1 0x04(%r8)
  31. #define u2 0x08(%r8)
  32. #define u3 0x0c(%r8)
  33. #define u4 0x10(%r8)
  34. #define w0 0x14(%r8)
  35. #define w1 0x18(%r8)
  36. #define w2 0x1c(%r8)
  37. #define w3 0x20(%r8)
  38. #define w4 0x24(%r8)
  39. #define y0 0x28(%r8)
  40. #define y1 0x2c(%r8)
  41. #define y2 0x30(%r8)
  42. #define y3 0x34(%r8)
  43. #define y4 0x38(%r8)
  44. #define m %rsi
  45. #define hc0 %ymm0
  46. #define hc1 %ymm1
  47. #define hc2 %ymm2
  48. #define hc3 %ymm3
  49. #define hc4 %ymm4
  50. #define hc0x %xmm0
  51. #define hc1x %xmm1
  52. #define hc2x %xmm2
  53. #define hc3x %xmm3
  54. #define hc4x %xmm4
  55. #define t1 %ymm5
  56. #define t2 %ymm6
  57. #define t1x %xmm5
  58. #define t2x %xmm6
  59. #define ruwy0 %ymm7
  60. #define ruwy1 %ymm8
  61. #define ruwy2 %ymm9
  62. #define ruwy3 %ymm10
  63. #define ruwy4 %ymm11
  64. #define ruwy0x %xmm7
  65. #define ruwy1x %xmm8
  66. #define ruwy2x %xmm9
  67. #define ruwy3x %xmm10
  68. #define ruwy4x %xmm11
  69. #define svxz1 %ymm12
  70. #define svxz2 %ymm13
  71. #define svxz3 %ymm14
  72. #define svxz4 %ymm15
  73. #define d0 %r9
  74. #define d1 %r10
  75. #define d2 %r11
  76. #define d3 %r12
  77. #define d4 %r13
  78. ENTRY(poly1305_4block_avx2)
  79. # %rdi: Accumulator h[5]
  80. # %rsi: 64 byte input block m
  81. # %rdx: Poly1305 key r[5]
  82. # %rcx: Quadblock count
  83. # %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],
  84. # This four-block variant uses loop unrolled block processing. It
  85. # requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
  86. # h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r
  87. vzeroupper
  88. push %rbx
  89. push %r12
  90. push %r13
  91. # combine r0,u0,w0,y0
  92. vmovd y0,ruwy0x
  93. vmovd w0,t1x
  94. vpunpcklqdq t1,ruwy0,ruwy0
  95. vmovd u0,t1x
  96. vmovd r0,t2x
  97. vpunpcklqdq t2,t1,t1
  98. vperm2i128 $0x20,t1,ruwy0,ruwy0
  99. # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
  100. vmovd y1,ruwy1x
  101. vmovd w1,t1x
  102. vpunpcklqdq t1,ruwy1,ruwy1
  103. vmovd u1,t1x
  104. vmovd r1,t2x
  105. vpunpcklqdq t2,t1,t1
  106. vperm2i128 $0x20,t1,ruwy1,ruwy1
  107. vpslld $2,ruwy1,svxz1
  108. vpaddd ruwy1,svxz1,svxz1
  109. # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
  110. vmovd y2,ruwy2x
  111. vmovd w2,t1x
  112. vpunpcklqdq t1,ruwy2,ruwy2
  113. vmovd u2,t1x
  114. vmovd r2,t2x
  115. vpunpcklqdq t2,t1,t1
  116. vperm2i128 $0x20,t1,ruwy2,ruwy2
  117. vpslld $2,ruwy2,svxz2
  118. vpaddd ruwy2,svxz2,svxz2
  119. # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
  120. vmovd y3,ruwy3x
  121. vmovd w3,t1x
  122. vpunpcklqdq t1,ruwy3,ruwy3
  123. vmovd u3,t1x
  124. vmovd r3,t2x
  125. vpunpcklqdq t2,t1,t1
  126. vperm2i128 $0x20,t1,ruwy3,ruwy3
  127. vpslld $2,ruwy3,svxz3
  128. vpaddd ruwy3,svxz3,svxz3
  129. # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
  130. vmovd y4,ruwy4x
  131. vmovd w4,t1x
  132. vpunpcklqdq t1,ruwy4,ruwy4
  133. vmovd u4,t1x
  134. vmovd r4,t2x
  135. vpunpcklqdq t2,t1,t1
  136. vperm2i128 $0x20,t1,ruwy4,ruwy4
  137. vpslld $2,ruwy4,svxz4
  138. vpaddd ruwy4,svxz4,svxz4
  139. .Ldoblock4:
  140. # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
  141. # m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
  142. vmovd 0x00(m),hc0x
  143. vmovd 0x10(m),t1x
  144. vpunpcklqdq t1,hc0,hc0
  145. vmovd 0x20(m),t1x
  146. vmovd 0x30(m),t2x
  147. vpunpcklqdq t2,t1,t1
  148. vperm2i128 $0x20,t1,hc0,hc0
  149. vpand ANMASK(%rip),hc0,hc0
  150. vmovd h0,t1x
  151. vpaddd t1,hc0,hc0
  152. # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
  153. # (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
  154. vmovd 0x03(m),hc1x
  155. vmovd 0x13(m),t1x
  156. vpunpcklqdq t1,hc1,hc1
  157. vmovd 0x23(m),t1x
  158. vmovd 0x33(m),t2x
  159. vpunpcklqdq t2,t1,t1
  160. vperm2i128 $0x20,t1,hc1,hc1
  161. vpsrld $2,hc1,hc1
  162. vpand ANMASK(%rip),hc1,hc1
  163. vmovd h1,t1x
  164. vpaddd t1,hc1,hc1
  165. # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
  166. # (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
  167. vmovd 0x06(m),hc2x
  168. vmovd 0x16(m),t1x
  169. vpunpcklqdq t1,hc2,hc2
  170. vmovd 0x26(m),t1x
  171. vmovd 0x36(m),t2x
  172. vpunpcklqdq t2,t1,t1
  173. vperm2i128 $0x20,t1,hc2,hc2
  174. vpsrld $4,hc2,hc2
  175. vpand ANMASK(%rip),hc2,hc2
  176. vmovd h2,t1x
  177. vpaddd t1,hc2,hc2
  178. # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
  179. # (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
  180. vmovd 0x09(m),hc3x
  181. vmovd 0x19(m),t1x
  182. vpunpcklqdq t1,hc3,hc3
  183. vmovd 0x29(m),t1x
  184. vmovd 0x39(m),t2x
  185. vpunpcklqdq t2,t1,t1
  186. vperm2i128 $0x20,t1,hc3,hc3
  187. vpsrld $6,hc3,hc3
  188. vpand ANMASK(%rip),hc3,hc3
  189. vmovd h3,t1x
  190. vpaddd t1,hc3,hc3
  191. # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
  192. # (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
  193. vmovd 0x0c(m),hc4x
  194. vmovd 0x1c(m),t1x
  195. vpunpcklqdq t1,hc4,hc4
  196. vmovd 0x2c(m),t1x
  197. vmovd 0x3c(m),t2x
  198. vpunpcklqdq t2,t1,t1
  199. vperm2i128 $0x20,t1,hc4,hc4
  200. vpsrld $8,hc4,hc4
  201. vpor ORMASK(%rip),hc4,hc4
  202. vmovd h4,t1x
  203. vpaddd t1,hc4,hc4
  204. # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
  205. vpmuludq hc0,ruwy0,t1
  206. # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
  207. vpmuludq hc1,svxz4,t2
  208. vpaddq t2,t1,t1
  209. # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
  210. vpmuludq hc2,svxz3,t2
  211. vpaddq t2,t1,t1
  212. # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
  213. vpmuludq hc3,svxz2,t2
  214. vpaddq t2,t1,t1
  215. # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
  216. vpmuludq hc4,svxz1,t2
  217. vpaddq t2,t1,t1
  218. # d0 = t1[0] + t1[1] + t[2] + t[3]
  219. vpermq $0xee,t1,t2
  220. vpaddq t2,t1,t1
  221. vpsrldq $8,t1,t2
  222. vpaddq t2,t1,t1
  223. vmovq t1x,d0
  224. # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
  225. vpmuludq hc0,ruwy1,t1
  226. # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
  227. vpmuludq hc1,ruwy0,t2
  228. vpaddq t2,t1,t1
  229. # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
  230. vpmuludq hc2,svxz4,t2
  231. vpaddq t2,t1,t1
  232. # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
  233. vpmuludq hc3,svxz3,t2
  234. vpaddq t2,t1,t1
  235. # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
  236. vpmuludq hc4,svxz2,t2
  237. vpaddq t2,t1,t1
  238. # d1 = t1[0] + t1[1] + t1[3] + t1[4]
  239. vpermq $0xee,t1,t2
  240. vpaddq t2,t1,t1
  241. vpsrldq $8,t1,t2
  242. vpaddq t2,t1,t1
  243. vmovq t1x,d1
  244. # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
  245. vpmuludq hc0,ruwy2,t1
  246. # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
  247. vpmuludq hc1,ruwy1,t2
  248. vpaddq t2,t1,t1
  249. # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
  250. vpmuludq hc2,ruwy0,t2
  251. vpaddq t2,t1,t1
  252. # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
  253. vpmuludq hc3,svxz4,t2
  254. vpaddq t2,t1,t1
  255. # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
  256. vpmuludq hc4,svxz3,t2
  257. vpaddq t2,t1,t1
  258. # d2 = t1[0] + t1[1] + t1[2] + t1[3]
  259. vpermq $0xee,t1,t2
  260. vpaddq t2,t1,t1
  261. vpsrldq $8,t1,t2
  262. vpaddq t2,t1,t1
  263. vmovq t1x,d2
  264. # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
  265. vpmuludq hc0,ruwy3,t1
  266. # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
  267. vpmuludq hc1,ruwy2,t2
  268. vpaddq t2,t1,t1
  269. # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
  270. vpmuludq hc2,ruwy1,t2
  271. vpaddq t2,t1,t1
  272. # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
  273. vpmuludq hc3,ruwy0,t2
  274. vpaddq t2,t1,t1
  275. # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
  276. vpmuludq hc4,svxz4,t2
  277. vpaddq t2,t1,t1
  278. # d3 = t1[0] + t1[1] + t1[2] + t1[3]
  279. vpermq $0xee,t1,t2
  280. vpaddq t2,t1,t1
  281. vpsrldq $8,t1,t2
  282. vpaddq t2,t1,t1
  283. vmovq t1x,d3
  284. # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
  285. vpmuludq hc0,ruwy4,t1
  286. # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
  287. vpmuludq hc1,ruwy3,t2
  288. vpaddq t2,t1,t1
  289. # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
  290. vpmuludq hc2,ruwy2,t2
  291. vpaddq t2,t1,t1
  292. # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
  293. vpmuludq hc3,ruwy1,t2
  294. vpaddq t2,t1,t1
  295. # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
  296. vpmuludq hc4,ruwy0,t2
  297. vpaddq t2,t1,t1
  298. # d4 = t1[0] + t1[1] + t1[2] + t1[3]
  299. vpermq $0xee,t1,t2
  300. vpaddq t2,t1,t1
  301. vpsrldq $8,t1,t2
  302. vpaddq t2,t1,t1
  303. vmovq t1x,d4
  304. # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
  305. # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
  306. # amount. Careful: we must not assume the carry bits 'd0 >> 26',
  307. # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
  308. # integers. It's true in a single-block implementation, but not here.
  309. # d1 += d0 >> 26
  310. mov d0,%rax
  311. shr $26,%rax
  312. add %rax,d1
  313. # h0 = d0 & 0x3ffffff
  314. mov d0,%rbx
  315. and $0x3ffffff,%ebx
  316. # d2 += d1 >> 26
  317. mov d1,%rax
  318. shr $26,%rax
  319. add %rax,d2
  320. # h1 = d1 & 0x3ffffff
  321. mov d1,%rax
  322. and $0x3ffffff,%eax
  323. mov %eax,h1
  324. # d3 += d2 >> 26
  325. mov d2,%rax
  326. shr $26,%rax
  327. add %rax,d3
  328. # h2 = d2 & 0x3ffffff
  329. mov d2,%rax
  330. and $0x3ffffff,%eax
  331. mov %eax,h2
  332. # d4 += d3 >> 26
  333. mov d3,%rax
  334. shr $26,%rax
  335. add %rax,d4
  336. # h3 = d3 & 0x3ffffff
  337. mov d3,%rax
  338. and $0x3ffffff,%eax
  339. mov %eax,h3
  340. # h0 += (d4 >> 26) * 5
  341. mov d4,%rax
  342. shr $26,%rax
  343. lea (%rax,%rax,4),%rax
  344. add %rax,%rbx
  345. # h4 = d4 & 0x3ffffff
  346. mov d4,%rax
  347. and $0x3ffffff,%eax
  348. mov %eax,h4
  349. # h1 += h0 >> 26
  350. mov %rbx,%rax
  351. shr $26,%rax
  352. add %eax,h1
  353. # h0 = h0 & 0x3ffffff
  354. andl $0x3ffffff,%ebx
  355. mov %ebx,h0
  356. add $0x40,m
  357. dec %rcx
  358. jnz .Ldoblock4
  359. vzeroupper
  360. pop %r13
  361. pop %r12
  362. pop %rbx
  363. ret
  364. ENDPROC(poly1305_4block_avx2)