ablkcipher.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. /*
  2. * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/types.h>
  16. #include <crypto/aes.h>
  17. #include <crypto/algapi.h>
  18. #include <crypto/des.h>
  19. #include "cipher.h"
  20. static LIST_HEAD(ablkcipher_algs);
  21. static void qce_ablkcipher_done(void *data)
  22. {
  23. struct crypto_async_request *async_req = data;
  24. struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
  25. struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
  26. struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
  27. struct qce_device *qce = tmpl->qce;
  28. enum dma_data_direction dir_src, dir_dst;
  29. u32 status;
  30. int error;
  31. bool diff_dst;
  32. diff_dst = (req->src != req->dst) ? true : false;
  33. dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
  34. dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
  35. error = qce_dma_terminate_all(&qce->dma);
  36. if (error)
  37. dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
  38. error);
  39. if (diff_dst)
  40. dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
  41. dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  42. sg_free_table(&rctx->dst_tbl);
  43. error = qce_check_status(qce, &status);
  44. if (error < 0)
  45. dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
  46. qce->async_req_done(tmpl->qce, error);
  47. }
  48. static int
  49. qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
  50. {
  51. struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
  52. struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
  53. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  54. struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
  55. struct qce_device *qce = tmpl->qce;
  56. enum dma_data_direction dir_src, dir_dst;
  57. struct scatterlist *sg;
  58. bool diff_dst;
  59. gfp_t gfp;
  60. int ret;
  61. rctx->iv = req->info;
  62. rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  63. rctx->cryptlen = req->nbytes;
  64. diff_dst = (req->src != req->dst) ? true : false;
  65. dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
  66. dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
  67. rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
  68. if (diff_dst)
  69. rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  70. else
  71. rctx->dst_nents = rctx->src_nents;
  72. rctx->dst_nents += 1;
  73. gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  74. GFP_KERNEL : GFP_ATOMIC;
  75. ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
  76. if (ret)
  77. return ret;
  78. sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
  79. sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
  80. if (IS_ERR(sg)) {
  81. ret = PTR_ERR(sg);
  82. goto error_free;
  83. }
  84. sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
  85. if (IS_ERR(sg)) {
  86. ret = PTR_ERR(sg);
  87. goto error_free;
  88. }
  89. sg_mark_end(sg);
  90. rctx->dst_sg = rctx->dst_tbl.sgl;
  91. ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  92. if (ret < 0)
  93. goto error_free;
  94. if (diff_dst) {
  95. ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
  96. if (ret < 0)
  97. goto error_unmap_dst;
  98. rctx->src_sg = req->src;
  99. } else {
  100. rctx->src_sg = rctx->dst_sg;
  101. }
  102. ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
  103. rctx->dst_sg, rctx->dst_nents,
  104. qce_ablkcipher_done, async_req);
  105. if (ret)
  106. goto error_unmap_src;
  107. qce_dma_issue_pending(&qce->dma);
  108. ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
  109. if (ret)
  110. goto error_terminate;
  111. return 0;
  112. error_terminate:
  113. qce_dma_terminate_all(&qce->dma);
  114. error_unmap_src:
  115. if (diff_dst)
  116. dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
  117. error_unmap_dst:
  118. dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  119. error_free:
  120. sg_free_table(&rctx->dst_tbl);
  121. return ret;
  122. }
  123. static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
  124. unsigned int keylen)
  125. {
  126. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
  127. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  128. unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
  129. int ret;
  130. if (!key || !keylen)
  131. return -EINVAL;
  132. if (IS_AES(flags)) {
  133. switch (keylen) {
  134. case AES_KEYSIZE_128:
  135. case AES_KEYSIZE_256:
  136. break;
  137. default:
  138. goto fallback;
  139. }
  140. } else if (IS_DES(flags)) {
  141. u32 tmp[DES_EXPKEY_WORDS];
  142. ret = des_ekey(tmp, key);
  143. if (!ret && crypto_ablkcipher_get_flags(ablk) &
  144. CRYPTO_TFM_REQ_WEAK_KEY)
  145. goto weakkey;
  146. }
  147. ctx->enc_keylen = keylen;
  148. memcpy(ctx->enc_key, key, keylen);
  149. return 0;
  150. fallback:
  151. ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
  152. if (!ret)
  153. ctx->enc_keylen = keylen;
  154. return ret;
  155. weakkey:
  156. crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
  157. return -EINVAL;
  158. }
  159. static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
  160. {
  161. struct crypto_tfm *tfm =
  162. crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
  163. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  164. struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
  165. struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
  166. int ret;
  167. rctx->flags = tmpl->alg_flags;
  168. rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
  169. if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
  170. ctx->enc_keylen != AES_KEYSIZE_256) {
  171. ablkcipher_request_set_tfm(req, ctx->fallback);
  172. ret = encrypt ? crypto_ablkcipher_encrypt(req) :
  173. crypto_ablkcipher_decrypt(req);
  174. ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
  175. return ret;
  176. }
  177. return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
  178. }
  179. static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
  180. {
  181. return qce_ablkcipher_crypt(req, 1);
  182. }
  183. static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
  184. {
  185. return qce_ablkcipher_crypt(req, 0);
  186. }
  187. static int qce_ablkcipher_init(struct crypto_tfm *tfm)
  188. {
  189. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  190. memset(ctx, 0, sizeof(*ctx));
  191. tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
  192. ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
  193. CRYPTO_ALG_TYPE_ABLKCIPHER,
  194. CRYPTO_ALG_ASYNC |
  195. CRYPTO_ALG_NEED_FALLBACK);
  196. if (IS_ERR(ctx->fallback))
  197. return PTR_ERR(ctx->fallback);
  198. return 0;
  199. }
  200. static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
  201. {
  202. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  203. crypto_free_ablkcipher(ctx->fallback);
  204. }
  205. struct qce_ablkcipher_def {
  206. unsigned long flags;
  207. const char *name;
  208. const char *drv_name;
  209. unsigned int blocksize;
  210. unsigned int ivsize;
  211. unsigned int min_keysize;
  212. unsigned int max_keysize;
  213. };
  214. static const struct qce_ablkcipher_def ablkcipher_def[] = {
  215. {
  216. .flags = QCE_ALG_AES | QCE_MODE_ECB,
  217. .name = "ecb(aes)",
  218. .drv_name = "ecb-aes-qce",
  219. .blocksize = AES_BLOCK_SIZE,
  220. .ivsize = AES_BLOCK_SIZE,
  221. .min_keysize = AES_MIN_KEY_SIZE,
  222. .max_keysize = AES_MAX_KEY_SIZE,
  223. },
  224. {
  225. .flags = QCE_ALG_AES | QCE_MODE_CBC,
  226. .name = "cbc(aes)",
  227. .drv_name = "cbc-aes-qce",
  228. .blocksize = AES_BLOCK_SIZE,
  229. .ivsize = AES_BLOCK_SIZE,
  230. .min_keysize = AES_MIN_KEY_SIZE,
  231. .max_keysize = AES_MAX_KEY_SIZE,
  232. },
  233. {
  234. .flags = QCE_ALG_AES | QCE_MODE_CTR,
  235. .name = "ctr(aes)",
  236. .drv_name = "ctr-aes-qce",
  237. .blocksize = AES_BLOCK_SIZE,
  238. .ivsize = AES_BLOCK_SIZE,
  239. .min_keysize = AES_MIN_KEY_SIZE,
  240. .max_keysize = AES_MAX_KEY_SIZE,
  241. },
  242. {
  243. .flags = QCE_ALG_AES | QCE_MODE_XTS,
  244. .name = "xts(aes)",
  245. .drv_name = "xts-aes-qce",
  246. .blocksize = AES_BLOCK_SIZE,
  247. .ivsize = AES_BLOCK_SIZE,
  248. .min_keysize = AES_MIN_KEY_SIZE,
  249. .max_keysize = AES_MAX_KEY_SIZE,
  250. },
  251. {
  252. .flags = QCE_ALG_DES | QCE_MODE_ECB,
  253. .name = "ecb(des)",
  254. .drv_name = "ecb-des-qce",
  255. .blocksize = DES_BLOCK_SIZE,
  256. .ivsize = 0,
  257. .min_keysize = DES_KEY_SIZE,
  258. .max_keysize = DES_KEY_SIZE,
  259. },
  260. {
  261. .flags = QCE_ALG_DES | QCE_MODE_CBC,
  262. .name = "cbc(des)",
  263. .drv_name = "cbc-des-qce",
  264. .blocksize = DES_BLOCK_SIZE,
  265. .ivsize = DES_BLOCK_SIZE,
  266. .min_keysize = DES_KEY_SIZE,
  267. .max_keysize = DES_KEY_SIZE,
  268. },
  269. {
  270. .flags = QCE_ALG_3DES | QCE_MODE_ECB,
  271. .name = "ecb(des3_ede)",
  272. .drv_name = "ecb-3des-qce",
  273. .blocksize = DES3_EDE_BLOCK_SIZE,
  274. .ivsize = 0,
  275. .min_keysize = DES3_EDE_KEY_SIZE,
  276. .max_keysize = DES3_EDE_KEY_SIZE,
  277. },
  278. {
  279. .flags = QCE_ALG_3DES | QCE_MODE_CBC,
  280. .name = "cbc(des3_ede)",
  281. .drv_name = "cbc-3des-qce",
  282. .blocksize = DES3_EDE_BLOCK_SIZE,
  283. .ivsize = DES3_EDE_BLOCK_SIZE,
  284. .min_keysize = DES3_EDE_KEY_SIZE,
  285. .max_keysize = DES3_EDE_KEY_SIZE,
  286. },
  287. };
  288. static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
  289. struct qce_device *qce)
  290. {
  291. struct qce_alg_template *tmpl;
  292. struct crypto_alg *alg;
  293. int ret;
  294. tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
  295. if (!tmpl)
  296. return -ENOMEM;
  297. alg = &tmpl->alg.crypto;
  298. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
  299. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  300. def->drv_name);
  301. alg->cra_blocksize = def->blocksize;
  302. alg->cra_ablkcipher.ivsize = def->ivsize;
  303. alg->cra_ablkcipher.min_keysize = def->min_keysize;
  304. alg->cra_ablkcipher.max_keysize = def->max_keysize;
  305. alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
  306. alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
  307. alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
  308. alg->cra_priority = 300;
  309. alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
  310. CRYPTO_ALG_NEED_FALLBACK;
  311. alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
  312. alg->cra_alignmask = 0;
  313. alg->cra_type = &crypto_ablkcipher_type;
  314. alg->cra_module = THIS_MODULE;
  315. alg->cra_init = qce_ablkcipher_init;
  316. alg->cra_exit = qce_ablkcipher_exit;
  317. INIT_LIST_HEAD(&alg->cra_list);
  318. INIT_LIST_HEAD(&tmpl->entry);
  319. tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
  320. tmpl->alg_flags = def->flags;
  321. tmpl->qce = qce;
  322. ret = crypto_register_alg(alg);
  323. if (ret) {
  324. kfree(tmpl);
  325. dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
  326. return ret;
  327. }
  328. list_add_tail(&tmpl->entry, &ablkcipher_algs);
  329. dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
  330. return 0;
  331. }
  332. static void qce_ablkcipher_unregister(struct qce_device *qce)
  333. {
  334. struct qce_alg_template *tmpl, *n;
  335. list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
  336. crypto_unregister_alg(&tmpl->alg.crypto);
  337. list_del(&tmpl->entry);
  338. kfree(tmpl);
  339. }
  340. }
  341. static int qce_ablkcipher_register(struct qce_device *qce)
  342. {
  343. int ret, i;
  344. for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
  345. ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
  346. if (ret)
  347. goto err;
  348. }
  349. return 0;
  350. err:
  351. qce_ablkcipher_unregister(qce);
  352. return ret;
  353. }
  354. const struct qce_algo_ops ablkcipher_ops = {
  355. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  356. .register_algs = qce_ablkcipher_register,
  357. .unregister_algs = qce_ablkcipher_unregister,
  358. .async_req_handle = qce_ablkcipher_async_req_handle,
  359. };