serpent_sse2_glue.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /*
  2. * Glue Code for SSE2 assembler versions of Serpent Cipher
  3. *
  4. * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  5. *
  6. * Glue code based on aesni-intel_glue.c by:
  7. * Copyright (C) 2008, Intel Corp.
  8. * Author: Huang Ying <ying.huang@intel.com>
  9. *
  10. * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
  11. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  12. * CTR part based on code (crypto/ctr.c) by:
  13. * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or
  18. * (at your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23. * GNU General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU General Public License
  26. * along with this program; if not, write to the Free Software
  27. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  28. * USA
  29. *
  30. */
  31. #include <linux/module.h>
  32. #include <linux/hardirq.h>
  33. #include <linux/types.h>
  34. #include <linux/crypto.h>
  35. #include <linux/err.h>
  36. #include <crypto/ablk_helper.h>
  37. #include <crypto/algapi.h>
  38. #include <crypto/serpent.h>
  39. #include <crypto/cryptd.h>
  40. #include <crypto/b128ops.h>
  41. #include <crypto/ctr.h>
  42. #include <crypto/lrw.h>
  43. #include <crypto/xts.h>
  44. #include <asm/crypto/serpent-sse2.h>
  45. #include <asm/crypto/glue_helper.h>
  46. static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
  47. {
  48. u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
  49. unsigned int j;
  50. for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
  51. ivs[j] = src[j];
  52. serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
  53. for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
  54. u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
  55. }
  56. static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
  57. {
  58. be128 ctrblk;
  59. le128_to_be128(&ctrblk, iv);
  60. le128_inc(iv);
  61. __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
  62. u128_xor(dst, src, (u128 *)&ctrblk);
  63. }
  64. static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
  65. le128 *iv)
  66. {
  67. be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
  68. unsigned int i;
  69. for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
  70. if (dst != src)
  71. dst[i] = src[i];
  72. le128_to_be128(&ctrblks[i], iv);
  73. le128_inc(iv);
  74. }
  75. serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
  76. }
  77. static const struct common_glue_ctx serpent_enc = {
  78. .num_funcs = 2,
  79. .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
  80. .funcs = { {
  81. .num_blocks = SERPENT_PARALLEL_BLOCKS,
  82. .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
  83. }, {
  84. .num_blocks = 1,
  85. .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
  86. } }
  87. };
  88. static const struct common_glue_ctx serpent_ctr = {
  89. .num_funcs = 2,
  90. .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
  91. .funcs = { {
  92. .num_blocks = SERPENT_PARALLEL_BLOCKS,
  93. .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
  94. }, {
  95. .num_blocks = 1,
  96. .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
  97. } }
  98. };
  99. static const struct common_glue_ctx serpent_dec = {
  100. .num_funcs = 2,
  101. .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
  102. .funcs = { {
  103. .num_blocks = SERPENT_PARALLEL_BLOCKS,
  104. .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
  105. }, {
  106. .num_blocks = 1,
  107. .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
  108. } }
  109. };
  110. static const struct common_glue_ctx serpent_dec_cbc = {
  111. .num_funcs = 2,
  112. .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
  113. .funcs = { {
  114. .num_blocks = SERPENT_PARALLEL_BLOCKS,
  115. .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
  116. }, {
  117. .num_blocks = 1,
  118. .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
  119. } }
  120. };
  121. static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  122. struct scatterlist *src, unsigned int nbytes)
  123. {
  124. return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
  125. }
  126. static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  127. struct scatterlist *src, unsigned int nbytes)
  128. {
  129. return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
  130. }
  131. static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  132. struct scatterlist *src, unsigned int nbytes)
  133. {
  134. return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
  135. dst, src, nbytes);
  136. }
  137. static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  138. struct scatterlist *src, unsigned int nbytes)
  139. {
  140. return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
  141. nbytes);
  142. }
  143. static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  144. struct scatterlist *src, unsigned int nbytes)
  145. {
  146. return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
  147. }
  148. static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
  149. {
  150. return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
  151. NULL, fpu_enabled, nbytes);
  152. }
  153. static inline void serpent_fpu_end(bool fpu_enabled)
  154. {
  155. glue_fpu_end(fpu_enabled);
  156. }
  157. struct crypt_priv {
  158. struct serpent_ctx *ctx;
  159. bool fpu_enabled;
  160. };
  161. static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
  162. {
  163. const unsigned int bsize = SERPENT_BLOCK_SIZE;
  164. struct crypt_priv *ctx = priv;
  165. int i;
  166. ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
  167. if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
  168. serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
  169. return;
  170. }
  171. for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
  172. __serpent_encrypt(ctx->ctx, srcdst, srcdst);
  173. }
  174. static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
  175. {
  176. const unsigned int bsize = SERPENT_BLOCK_SIZE;
  177. struct crypt_priv *ctx = priv;
  178. int i;
  179. ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
  180. if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
  181. serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
  182. return;
  183. }
  184. for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
  185. __serpent_decrypt(ctx->ctx, srcdst, srcdst);
  186. }
  187. struct serpent_lrw_ctx {
  188. struct lrw_table_ctx lrw_table;
  189. struct serpent_ctx serpent_ctx;
  190. };
  191. static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
  192. unsigned int keylen)
  193. {
  194. struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
  195. int err;
  196. err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
  197. SERPENT_BLOCK_SIZE);
  198. if (err)
  199. return err;
  200. return lrw_init_table(&ctx->lrw_table, key + keylen -
  201. SERPENT_BLOCK_SIZE);
  202. }
  203. static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  204. struct scatterlist *src, unsigned int nbytes)
  205. {
  206. struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  207. be128 buf[SERPENT_PARALLEL_BLOCKS];
  208. struct crypt_priv crypt_ctx = {
  209. .ctx = &ctx->serpent_ctx,
  210. .fpu_enabled = false,
  211. };
  212. struct lrw_crypt_req req = {
  213. .tbuf = buf,
  214. .tbuflen = sizeof(buf),
  215. .table_ctx = &ctx->lrw_table,
  216. .crypt_ctx = &crypt_ctx,
  217. .crypt_fn = encrypt_callback,
  218. };
  219. int ret;
  220. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  221. ret = lrw_crypt(desc, dst, src, nbytes, &req);
  222. serpent_fpu_end(crypt_ctx.fpu_enabled);
  223. return ret;
  224. }
  225. static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  226. struct scatterlist *src, unsigned int nbytes)
  227. {
  228. struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  229. be128 buf[SERPENT_PARALLEL_BLOCKS];
  230. struct crypt_priv crypt_ctx = {
  231. .ctx = &ctx->serpent_ctx,
  232. .fpu_enabled = false,
  233. };
  234. struct lrw_crypt_req req = {
  235. .tbuf = buf,
  236. .tbuflen = sizeof(buf),
  237. .table_ctx = &ctx->lrw_table,
  238. .crypt_ctx = &crypt_ctx,
  239. .crypt_fn = decrypt_callback,
  240. };
  241. int ret;
  242. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  243. ret = lrw_crypt(desc, dst, src, nbytes, &req);
  244. serpent_fpu_end(crypt_ctx.fpu_enabled);
  245. return ret;
  246. }
  247. static void lrw_exit_tfm(struct crypto_tfm *tfm)
  248. {
  249. struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
  250. lrw_free_table(&ctx->lrw_table);
  251. }
  252. struct serpent_xts_ctx {
  253. struct serpent_ctx tweak_ctx;
  254. struct serpent_ctx crypt_ctx;
  255. };
  256. static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
  257. unsigned int keylen)
  258. {
  259. struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
  260. u32 *flags = &tfm->crt_flags;
  261. int err;
  262. /* key consists of keys of equal size concatenated, therefore
  263. * the length must be even
  264. */
  265. if (keylen % 2) {
  266. *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  267. return -EINVAL;
  268. }
  269. /* first half of xts-key is for crypt */
  270. err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
  271. if (err)
  272. return err;
  273. /* second half of xts-key is for tweak */
  274. return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
  275. }
  276. static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  277. struct scatterlist *src, unsigned int nbytes)
  278. {
  279. struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  280. be128 buf[SERPENT_PARALLEL_BLOCKS];
  281. struct crypt_priv crypt_ctx = {
  282. .ctx = &ctx->crypt_ctx,
  283. .fpu_enabled = false,
  284. };
  285. struct xts_crypt_req req = {
  286. .tbuf = buf,
  287. .tbuflen = sizeof(buf),
  288. .tweak_ctx = &ctx->tweak_ctx,
  289. .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
  290. .crypt_ctx = &crypt_ctx,
  291. .crypt_fn = encrypt_callback,
  292. };
  293. int ret;
  294. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  295. ret = xts_crypt(desc, dst, src, nbytes, &req);
  296. serpent_fpu_end(crypt_ctx.fpu_enabled);
  297. return ret;
  298. }
  299. static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  300. struct scatterlist *src, unsigned int nbytes)
  301. {
  302. struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  303. be128 buf[SERPENT_PARALLEL_BLOCKS];
  304. struct crypt_priv crypt_ctx = {
  305. .ctx = &ctx->crypt_ctx,
  306. .fpu_enabled = false,
  307. };
  308. struct xts_crypt_req req = {
  309. .tbuf = buf,
  310. .tbuflen = sizeof(buf),
  311. .tweak_ctx = &ctx->tweak_ctx,
  312. .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
  313. .crypt_ctx = &crypt_ctx,
  314. .crypt_fn = decrypt_callback,
  315. };
  316. int ret;
  317. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  318. ret = xts_crypt(desc, dst, src, nbytes, &req);
  319. serpent_fpu_end(crypt_ctx.fpu_enabled);
  320. return ret;
  321. }
  322. static struct crypto_alg serpent_algs[10] = { {
  323. .cra_name = "__ecb-serpent-sse2",
  324. .cra_driver_name = "__driver-ecb-serpent-sse2",
  325. .cra_priority = 0,
  326. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  327. CRYPTO_ALG_INTERNAL,
  328. .cra_blocksize = SERPENT_BLOCK_SIZE,
  329. .cra_ctxsize = sizeof(struct serpent_ctx),
  330. .cra_alignmask = 0,
  331. .cra_type = &crypto_blkcipher_type,
  332. .cra_module = THIS_MODULE,
  333. .cra_u = {
  334. .blkcipher = {
  335. .min_keysize = SERPENT_MIN_KEY_SIZE,
  336. .max_keysize = SERPENT_MAX_KEY_SIZE,
  337. .setkey = serpent_setkey,
  338. .encrypt = ecb_encrypt,
  339. .decrypt = ecb_decrypt,
  340. },
  341. },
  342. }, {
  343. .cra_name = "__cbc-serpent-sse2",
  344. .cra_driver_name = "__driver-cbc-serpent-sse2",
  345. .cra_priority = 0,
  346. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  347. CRYPTO_ALG_INTERNAL,
  348. .cra_blocksize = SERPENT_BLOCK_SIZE,
  349. .cra_ctxsize = sizeof(struct serpent_ctx),
  350. .cra_alignmask = 0,
  351. .cra_type = &crypto_blkcipher_type,
  352. .cra_module = THIS_MODULE,
  353. .cra_u = {
  354. .blkcipher = {
  355. .min_keysize = SERPENT_MIN_KEY_SIZE,
  356. .max_keysize = SERPENT_MAX_KEY_SIZE,
  357. .setkey = serpent_setkey,
  358. .encrypt = cbc_encrypt,
  359. .decrypt = cbc_decrypt,
  360. },
  361. },
  362. }, {
  363. .cra_name = "__ctr-serpent-sse2",
  364. .cra_driver_name = "__driver-ctr-serpent-sse2",
  365. .cra_priority = 0,
  366. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  367. CRYPTO_ALG_INTERNAL,
  368. .cra_blocksize = 1,
  369. .cra_ctxsize = sizeof(struct serpent_ctx),
  370. .cra_alignmask = 0,
  371. .cra_type = &crypto_blkcipher_type,
  372. .cra_module = THIS_MODULE,
  373. .cra_u = {
  374. .blkcipher = {
  375. .min_keysize = SERPENT_MIN_KEY_SIZE,
  376. .max_keysize = SERPENT_MAX_KEY_SIZE,
  377. .ivsize = SERPENT_BLOCK_SIZE,
  378. .setkey = serpent_setkey,
  379. .encrypt = ctr_crypt,
  380. .decrypt = ctr_crypt,
  381. },
  382. },
  383. }, {
  384. .cra_name = "__lrw-serpent-sse2",
  385. .cra_driver_name = "__driver-lrw-serpent-sse2",
  386. .cra_priority = 0,
  387. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  388. CRYPTO_ALG_INTERNAL,
  389. .cra_blocksize = SERPENT_BLOCK_SIZE,
  390. .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
  391. .cra_alignmask = 0,
  392. .cra_type = &crypto_blkcipher_type,
  393. .cra_module = THIS_MODULE,
  394. .cra_exit = lrw_exit_tfm,
  395. .cra_u = {
  396. .blkcipher = {
  397. .min_keysize = SERPENT_MIN_KEY_SIZE +
  398. SERPENT_BLOCK_SIZE,
  399. .max_keysize = SERPENT_MAX_KEY_SIZE +
  400. SERPENT_BLOCK_SIZE,
  401. .ivsize = SERPENT_BLOCK_SIZE,
  402. .setkey = lrw_serpent_setkey,
  403. .encrypt = lrw_encrypt,
  404. .decrypt = lrw_decrypt,
  405. },
  406. },
  407. }, {
  408. .cra_name = "__xts-serpent-sse2",
  409. .cra_driver_name = "__driver-xts-serpent-sse2",
  410. .cra_priority = 0,
  411. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  412. CRYPTO_ALG_INTERNAL,
  413. .cra_blocksize = SERPENT_BLOCK_SIZE,
  414. .cra_ctxsize = sizeof(struct serpent_xts_ctx),
  415. .cra_alignmask = 0,
  416. .cra_type = &crypto_blkcipher_type,
  417. .cra_module = THIS_MODULE,
  418. .cra_u = {
  419. .blkcipher = {
  420. .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
  421. .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
  422. .ivsize = SERPENT_BLOCK_SIZE,
  423. .setkey = xts_serpent_setkey,
  424. .encrypt = xts_encrypt,
  425. .decrypt = xts_decrypt,
  426. },
  427. },
  428. }, {
  429. .cra_name = "ecb(serpent)",
  430. .cra_driver_name = "ecb-serpent-sse2",
  431. .cra_priority = 400,
  432. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  433. .cra_blocksize = SERPENT_BLOCK_SIZE,
  434. .cra_ctxsize = sizeof(struct async_helper_ctx),
  435. .cra_alignmask = 0,
  436. .cra_type = &crypto_ablkcipher_type,
  437. .cra_module = THIS_MODULE,
  438. .cra_init = ablk_init,
  439. .cra_exit = ablk_exit,
  440. .cra_u = {
  441. .ablkcipher = {
  442. .min_keysize = SERPENT_MIN_KEY_SIZE,
  443. .max_keysize = SERPENT_MAX_KEY_SIZE,
  444. .setkey = ablk_set_key,
  445. .encrypt = ablk_encrypt,
  446. .decrypt = ablk_decrypt,
  447. },
  448. },
  449. }, {
  450. .cra_name = "cbc(serpent)",
  451. .cra_driver_name = "cbc-serpent-sse2",
  452. .cra_priority = 400,
  453. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  454. .cra_blocksize = SERPENT_BLOCK_SIZE,
  455. .cra_ctxsize = sizeof(struct async_helper_ctx),
  456. .cra_alignmask = 0,
  457. .cra_type = &crypto_ablkcipher_type,
  458. .cra_module = THIS_MODULE,
  459. .cra_init = ablk_init,
  460. .cra_exit = ablk_exit,
  461. .cra_u = {
  462. .ablkcipher = {
  463. .min_keysize = SERPENT_MIN_KEY_SIZE,
  464. .max_keysize = SERPENT_MAX_KEY_SIZE,
  465. .ivsize = SERPENT_BLOCK_SIZE,
  466. .setkey = ablk_set_key,
  467. .encrypt = __ablk_encrypt,
  468. .decrypt = ablk_decrypt,
  469. },
  470. },
  471. }, {
  472. .cra_name = "ctr(serpent)",
  473. .cra_driver_name = "ctr-serpent-sse2",
  474. .cra_priority = 400,
  475. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  476. .cra_blocksize = 1,
  477. .cra_ctxsize = sizeof(struct async_helper_ctx),
  478. .cra_alignmask = 0,
  479. .cra_type = &crypto_ablkcipher_type,
  480. .cra_module = THIS_MODULE,
  481. .cra_init = ablk_init,
  482. .cra_exit = ablk_exit,
  483. .cra_u = {
  484. .ablkcipher = {
  485. .min_keysize = SERPENT_MIN_KEY_SIZE,
  486. .max_keysize = SERPENT_MAX_KEY_SIZE,
  487. .ivsize = SERPENT_BLOCK_SIZE,
  488. .setkey = ablk_set_key,
  489. .encrypt = ablk_encrypt,
  490. .decrypt = ablk_encrypt,
  491. .geniv = "chainiv",
  492. },
  493. },
  494. }, {
  495. .cra_name = "lrw(serpent)",
  496. .cra_driver_name = "lrw-serpent-sse2",
  497. .cra_priority = 400,
  498. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  499. .cra_blocksize = SERPENT_BLOCK_SIZE,
  500. .cra_ctxsize = sizeof(struct async_helper_ctx),
  501. .cra_alignmask = 0,
  502. .cra_type = &crypto_ablkcipher_type,
  503. .cra_module = THIS_MODULE,
  504. .cra_init = ablk_init,
  505. .cra_exit = ablk_exit,
  506. .cra_u = {
  507. .ablkcipher = {
  508. .min_keysize = SERPENT_MIN_KEY_SIZE +
  509. SERPENT_BLOCK_SIZE,
  510. .max_keysize = SERPENT_MAX_KEY_SIZE +
  511. SERPENT_BLOCK_SIZE,
  512. .ivsize = SERPENT_BLOCK_SIZE,
  513. .setkey = ablk_set_key,
  514. .encrypt = ablk_encrypt,
  515. .decrypt = ablk_decrypt,
  516. },
  517. },
  518. }, {
  519. .cra_name = "xts(serpent)",
  520. .cra_driver_name = "xts-serpent-sse2",
  521. .cra_priority = 400,
  522. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  523. .cra_blocksize = SERPENT_BLOCK_SIZE,
  524. .cra_ctxsize = sizeof(struct async_helper_ctx),
  525. .cra_alignmask = 0,
  526. .cra_type = &crypto_ablkcipher_type,
  527. .cra_module = THIS_MODULE,
  528. .cra_init = ablk_init,
  529. .cra_exit = ablk_exit,
  530. .cra_u = {
  531. .ablkcipher = {
  532. .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
  533. .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
  534. .ivsize = SERPENT_BLOCK_SIZE,
  535. .setkey = ablk_set_key,
  536. .encrypt = ablk_encrypt,
  537. .decrypt = ablk_decrypt,
  538. },
  539. },
  540. } };
  541. static int __init serpent_sse2_init(void)
  542. {
  543. if (!cpu_has_xmm2) {
  544. printk(KERN_INFO "SSE2 instructions are not detected.\n");
  545. return -ENODEV;
  546. }
  547. return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
  548. }
  549. static void __exit serpent_sse2_exit(void)
  550. {
  551. crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
  552. }
  553. module_init(serpent_sse2_init);
  554. module_exit(serpent_sse2_exit);
  555. MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
  556. MODULE_LICENSE("GPL");
  557. MODULE_ALIAS_CRYPTO("serpent");