eseqiv.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /*
  2. * eseqiv: Encrypted Sequence Number IV Generator
  3. *
  4. * This generator generates an IV based on a sequence number by xoring it
  5. * with a salt and then encrypting it with the same key as used to encrypt
  6. * the plain text. This algorithm requires that the block size be equal
  7. * to the IV size. It is mainly useful for CBC.
  8. *
  9. * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License as published by the Free
  13. * Software Foundation; either version 2 of the License, or (at your option)
  14. * any later version.
  15. *
  16. */
  17. #include <crypto/internal/skcipher.h>
  18. #include <crypto/rng.h>
  19. #include <crypto/scatterwalk.h>
  20. #include <linux/err.h>
  21. #include <linux/init.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/module.h>
  25. #include <linux/scatterlist.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/string.h>
  28. struct eseqiv_request_ctx {
  29. struct scatterlist src[2];
  30. struct scatterlist dst[2];
  31. char tail[];
  32. };
  33. struct eseqiv_ctx {
  34. spinlock_t lock;
  35. unsigned int reqoff;
  36. char salt[];
  37. };
  38. static void eseqiv_complete2(struct skcipher_givcrypt_request *req)
  39. {
  40. struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
  41. struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
  42. memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail,
  43. crypto_ablkcipher_alignmask(geniv) + 1),
  44. crypto_ablkcipher_ivsize(geniv));
  45. }
  46. static void eseqiv_complete(struct crypto_async_request *base, int err)
  47. {
  48. struct skcipher_givcrypt_request *req = base->data;
  49. if (err)
  50. goto out;
  51. eseqiv_complete2(req);
  52. out:
  53. skcipher_givcrypt_complete(req, err);
  54. }
  55. static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
  56. {
  57. struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
  58. struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
  59. struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
  60. struct ablkcipher_request *subreq;
  61. crypto_completion_t compl;
  62. void *data;
  63. struct scatterlist *osrc, *odst;
  64. struct scatterlist *dst;
  65. struct page *srcp;
  66. struct page *dstp;
  67. u8 *giv;
  68. u8 *vsrc;
  69. u8 *vdst;
  70. __be64 seq;
  71. unsigned int ivsize;
  72. unsigned int len;
  73. int err;
  74. subreq = (void *)(reqctx->tail + ctx->reqoff);
  75. ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
  76. giv = req->giv;
  77. compl = req->creq.base.complete;
  78. data = req->creq.base.data;
  79. osrc = req->creq.src;
  80. odst = req->creq.dst;
  81. srcp = sg_page(osrc);
  82. dstp = sg_page(odst);
  83. vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
  84. vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
  85. ivsize = crypto_ablkcipher_ivsize(geniv);
  86. if (vsrc != giv + ivsize && vdst != giv + ivsize) {
  87. giv = PTR_ALIGN((u8 *)reqctx->tail,
  88. crypto_ablkcipher_alignmask(geniv) + 1);
  89. compl = eseqiv_complete;
  90. data = req;
  91. }
  92. ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
  93. data);
  94. sg_init_table(reqctx->src, 2);
  95. sg_set_buf(reqctx->src, giv, ivsize);
  96. scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
  97. dst = reqctx->src;
  98. if (osrc != odst) {
  99. sg_init_table(reqctx->dst, 2);
  100. sg_set_buf(reqctx->dst, giv, ivsize);
  101. scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
  102. dst = reqctx->dst;
  103. }
  104. ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
  105. req->creq.nbytes + ivsize,
  106. req->creq.info);
  107. memcpy(req->creq.info, ctx->salt, ivsize);
  108. len = ivsize;
  109. if (ivsize > sizeof(u64)) {
  110. memset(req->giv, 0, ivsize - sizeof(u64));
  111. len = sizeof(u64);
  112. }
  113. seq = cpu_to_be64(req->seq);
  114. memcpy(req->giv + ivsize - len, &seq, len);
  115. err = crypto_ablkcipher_encrypt(subreq);
  116. if (err)
  117. goto out;
  118. if (giv != req->giv)
  119. eseqiv_complete2(req);
  120. out:
  121. return err;
  122. }
  123. static int eseqiv_init(struct crypto_tfm *tfm)
  124. {
  125. struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
  126. struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
  127. unsigned long alignmask;
  128. unsigned int reqsize;
  129. int err;
  130. spin_lock_init(&ctx->lock);
  131. alignmask = crypto_tfm_ctx_alignment() - 1;
  132. reqsize = sizeof(struct eseqiv_request_ctx);
  133. if (alignmask & reqsize) {
  134. alignmask &= reqsize;
  135. alignmask--;
  136. }
  137. alignmask = ~alignmask;
  138. alignmask &= crypto_ablkcipher_alignmask(geniv);
  139. reqsize += alignmask;
  140. reqsize += crypto_ablkcipher_ivsize(geniv);
  141. reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
  142. ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);
  143. tfm->crt_ablkcipher.reqsize = reqsize +
  144. sizeof(struct ablkcipher_request);
  145. err = 0;
  146. if (!crypto_get_default_rng()) {
  147. crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
  148. err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
  149. crypto_ablkcipher_ivsize(geniv));
  150. crypto_put_default_rng();
  151. }
  152. return err ?: skcipher_geniv_init(tfm);
  153. }
  154. static struct crypto_template eseqiv_tmpl;
  155. static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
  156. {
  157. struct crypto_instance *inst;
  158. int err;
  159. inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
  160. if (IS_ERR(inst))
  161. goto out;
  162. err = -EINVAL;
  163. if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
  164. goto free_inst;
  165. inst->alg.cra_init = eseqiv_init;
  166. inst->alg.cra_exit = skcipher_geniv_exit;
  167. inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx);
  168. inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
  169. out:
  170. return inst;
  171. free_inst:
  172. skcipher_geniv_free(inst);
  173. inst = ERR_PTR(err);
  174. goto out;
  175. }
  176. static struct crypto_template eseqiv_tmpl = {
  177. .name = "eseqiv",
  178. .alloc = eseqiv_alloc,
  179. .free = skcipher_geniv_free,
  180. .module = THIS_MODULE,
  181. };
  182. static int __init eseqiv_module_init(void)
  183. {
  184. return crypto_register_template(&eseqiv_tmpl);
  185. }
  186. static void __exit eseqiv_module_exit(void)
  187. {
  188. crypto_unregister_template(&eseqiv_tmpl);
  189. }
  190. module_init(eseqiv_module_init);
  191. module_exit(eseqiv_module_exit);
  192. MODULE_LICENSE("GPL");
  193. MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
  194. MODULE_ALIAS_CRYPTO("eseqiv");