glue_helper-asm-avx.S 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /*
  2. * Shared glue code for 128bit block ciphers, AVX assembler macros
  3. *
  4. * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \
  18. vmovdqu (0*16)(src), x0; \
  19. vmovdqu (1*16)(src), x1; \
  20. vmovdqu (2*16)(src), x2; \
  21. vmovdqu (3*16)(src), x3; \
  22. vmovdqu (4*16)(src), x4; \
  23. vmovdqu (5*16)(src), x5; \
  24. vmovdqu (6*16)(src), x6; \
  25. vmovdqu (7*16)(src), x7;
  26. #define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
  27. vmovdqu x0, (0*16)(dst); \
  28. vmovdqu x1, (1*16)(dst); \
  29. vmovdqu x2, (2*16)(dst); \
  30. vmovdqu x3, (3*16)(dst); \
  31. vmovdqu x4, (4*16)(dst); \
  32. vmovdqu x5, (5*16)(dst); \
  33. vmovdqu x6, (6*16)(dst); \
  34. vmovdqu x7, (7*16)(dst);
  35. #define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
  36. vpxor (0*16)(src), x1, x1; \
  37. vpxor (1*16)(src), x2, x2; \
  38. vpxor (2*16)(src), x3, x3; \
  39. vpxor (3*16)(src), x4, x4; \
  40. vpxor (4*16)(src), x5, x5; \
  41. vpxor (5*16)(src), x6, x6; \
  42. vpxor (6*16)(src), x7, x7; \
  43. store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
  44. #define inc_le128(x, minus_one, tmp) \
  45. vpcmpeqq minus_one, x, tmp; \
  46. vpsubq minus_one, x, x; \
  47. vpslldq $8, tmp, tmp; \
  48. vpsubq tmp, x, x;
  49. #define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \
  50. vpcmpeqd t0, t0, t0; \
  51. vpsrldq $8, t0, t0; /* low: -1, high: 0 */ \
  52. vmovdqa bswap, t1; \
  53. \
  54. /* load IV and byteswap */ \
  55. vmovdqu (iv), x7; \
  56. vpshufb t1, x7, x0; \
  57. \
  58. /* construct IVs */ \
  59. inc_le128(x7, t0, t2); \
  60. vpshufb t1, x7, x1; \
  61. inc_le128(x7, t0, t2); \
  62. vpshufb t1, x7, x2; \
  63. inc_le128(x7, t0, t2); \
  64. vpshufb t1, x7, x3; \
  65. inc_le128(x7, t0, t2); \
  66. vpshufb t1, x7, x4; \
  67. inc_le128(x7, t0, t2); \
  68. vpshufb t1, x7, x5; \
  69. inc_le128(x7, t0, t2); \
  70. vpshufb t1, x7, x6; \
  71. inc_le128(x7, t0, t2); \
  72. vmovdqa x7, t2; \
  73. vpshufb t1, x7, x7; \
  74. inc_le128(t2, t0, t1); \
  75. vmovdqu t2, (iv);
  76. #define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
  77. vpxor (0*16)(src), x0, x0; \
  78. vpxor (1*16)(src), x1, x1; \
  79. vpxor (2*16)(src), x2, x2; \
  80. vpxor (3*16)(src), x3, x3; \
  81. vpxor (4*16)(src), x4, x4; \
  82. vpxor (5*16)(src), x5, x5; \
  83. vpxor (6*16)(src), x6, x6; \
  84. vpxor (7*16)(src), x7, x7; \
  85. store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
  86. #define gf128mul_x_ble(iv, mask, tmp) \
  87. vpsrad $31, iv, tmp; \
  88. vpaddq iv, iv, iv; \
  89. vpshufd $0x13, tmp, tmp; \
  90. vpand mask, tmp, tmp; \
  91. vpxor tmp, iv, iv;
  92. #define load_xts_8way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, t0, \
  93. t1, xts_gf128mul_and_shl1_mask) \
  94. vmovdqa xts_gf128mul_and_shl1_mask, t0; \
  95. \
  96. /* load IV */ \
  97. vmovdqu (iv), tiv; \
  98. vpxor (0*16)(src), tiv, x0; \
  99. vmovdqu tiv, (0*16)(dst); \
  100. \
  101. /* construct and store IVs, also xor with source */ \
  102. gf128mul_x_ble(tiv, t0, t1); \
  103. vpxor (1*16)(src), tiv, x1; \
  104. vmovdqu tiv, (1*16)(dst); \
  105. \
  106. gf128mul_x_ble(tiv, t0, t1); \
  107. vpxor (2*16)(src), tiv, x2; \
  108. vmovdqu tiv, (2*16)(dst); \
  109. \
  110. gf128mul_x_ble(tiv, t0, t1); \
  111. vpxor (3*16)(src), tiv, x3; \
  112. vmovdqu tiv, (3*16)(dst); \
  113. \
  114. gf128mul_x_ble(tiv, t0, t1); \
  115. vpxor (4*16)(src), tiv, x4; \
  116. vmovdqu tiv, (4*16)(dst); \
  117. \
  118. gf128mul_x_ble(tiv, t0, t1); \
  119. vpxor (5*16)(src), tiv, x5; \
  120. vmovdqu tiv, (5*16)(dst); \
  121. \
  122. gf128mul_x_ble(tiv, t0, t1); \
  123. vpxor (6*16)(src), tiv, x6; \
  124. vmovdqu tiv, (6*16)(dst); \
  125. \
  126. gf128mul_x_ble(tiv, t0, t1); \
  127. vpxor (7*16)(src), tiv, x7; \
  128. vmovdqu tiv, (7*16)(dst); \
  129. \
  130. gf128mul_x_ble(tiv, t0, t1); \
  131. vmovdqu tiv, (iv);
  132. #define store_xts_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
  133. vpxor (0*16)(dst), x0, x0; \
  134. vpxor (1*16)(dst), x1, x1; \
  135. vpxor (2*16)(dst), x2, x2; \
  136. vpxor (3*16)(dst), x3, x3; \
  137. vpxor (4*16)(dst), x4, x4; \
  138. vpxor (5*16)(dst), x5, x5; \
  139. vpxor (6*16)(dst), x6, x6; \
  140. vpxor (7*16)(dst), x7, x7; \
  141. store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);