sun4i-ss-cipher.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. /*
  2. * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
  3. *
  4. * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
  5. *
  6. * This file add support for AES cipher with 128,192,256 bits
  7. * keysize in CBC and ECB mode.
  8. * Add support also for DES and 3DES in CBC and ECB mode.
  9. *
  10. * You could find the datasheet in Documentation/arm/sunxi/README
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. */
  17. #include "sun4i-ss.h"
  18. static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
  19. {
  20. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  21. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  22. struct sun4i_ss_ctx *ss = op->ss;
  23. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  24. struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
  25. u32 mode = ctx->mode;
  26. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  27. u32 rx_cnt = SS_RX_DEFAULT;
  28. u32 tx_cnt = 0;
  29. u32 spaces;
  30. u32 v;
  31. int i, err = 0;
  32. unsigned int ileft = areq->nbytes;
  33. unsigned int oleft = areq->nbytes;
  34. unsigned int todo;
  35. struct sg_mapping_iter mi, mo;
  36. unsigned int oi, oo; /* offset for in and out */
  37. unsigned long flags;
  38. if (areq->nbytes == 0)
  39. return 0;
  40. if (!areq->info) {
  41. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  42. return -EINVAL;
  43. }
  44. if (!areq->src || !areq->dst) {
  45. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  46. return -EINVAL;
  47. }
  48. spin_lock_irqsave(&ss->slock, flags);
  49. for (i = 0; i < op->keylen; i += 4)
  50. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  51. if (areq->info) {
  52. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  53. v = *(u32 *)(areq->info + i * 4);
  54. writel(v, ss->base + SS_IV0 + i * 4);
  55. }
  56. }
  57. writel(mode, ss->base + SS_CTL);
  58. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  59. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  60. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  61. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  62. sg_miter_next(&mi);
  63. sg_miter_next(&mo);
  64. if (!mi.addr || !mo.addr) {
  65. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  66. err = -EINVAL;
  67. goto release_ss;
  68. }
  69. ileft = areq->nbytes / 4;
  70. oleft = areq->nbytes / 4;
  71. oi = 0;
  72. oo = 0;
  73. do {
  74. todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
  75. if (todo > 0) {
  76. ileft -= todo;
  77. writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
  78. oi += todo * 4;
  79. }
  80. if (oi == mi.length) {
  81. sg_miter_next(&mi);
  82. oi = 0;
  83. }
  84. spaces = readl(ss->base + SS_FCSR);
  85. rx_cnt = SS_RXFIFO_SPACES(spaces);
  86. tx_cnt = SS_TXFIFO_SPACES(spaces);
  87. todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
  88. if (todo > 0) {
  89. oleft -= todo;
  90. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  91. oo += todo * 4;
  92. }
  93. if (oo == mo.length) {
  94. sg_miter_next(&mo);
  95. oo = 0;
  96. }
  97. } while (oleft > 0);
  98. if (areq->info) {
  99. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  100. v = readl(ss->base + SS_IV0 + i * 4);
  101. *(u32 *)(areq->info + i * 4) = v;
  102. }
  103. }
  104. release_ss:
  105. sg_miter_stop(&mi);
  106. sg_miter_stop(&mo);
  107. writel(0, ss->base + SS_CTL);
  108. spin_unlock_irqrestore(&ss->slock, flags);
  109. return err;
  110. }
  111. /* Generic function that support SG with size not multiple of 4 */
  112. static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
  113. {
  114. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  115. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  116. struct sun4i_ss_ctx *ss = op->ss;
  117. int no_chunk = 1;
  118. struct scatterlist *in_sg = areq->src;
  119. struct scatterlist *out_sg = areq->dst;
  120. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  121. struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
  122. u32 mode = ctx->mode;
  123. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  124. u32 rx_cnt = SS_RX_DEFAULT;
  125. u32 tx_cnt = 0;
  126. u32 v;
  127. u32 spaces;
  128. int i, err = 0;
  129. unsigned int ileft = areq->nbytes;
  130. unsigned int oleft = areq->nbytes;
  131. unsigned int todo;
  132. struct sg_mapping_iter mi, mo;
  133. unsigned int oi, oo; /* offset for in and out */
  134. char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
  135. char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
  136. unsigned int ob = 0; /* offset in buf */
  137. unsigned int obo = 0; /* offset in bufo*/
  138. unsigned int obl = 0; /* length of data in bufo */
  139. unsigned long flags;
  140. if (areq->nbytes == 0)
  141. return 0;
  142. if (!areq->info) {
  143. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  144. return -EINVAL;
  145. }
  146. if (!areq->src || !areq->dst) {
  147. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  148. return -EINVAL;
  149. }
  150. /*
  151. * if we have only SGs with size multiple of 4,
  152. * we can use the SS optimized function
  153. */
  154. while (in_sg && no_chunk == 1) {
  155. if ((in_sg->length % 4) != 0)
  156. no_chunk = 0;
  157. in_sg = sg_next(in_sg);
  158. }
  159. while (out_sg && no_chunk == 1) {
  160. if ((out_sg->length % 4) != 0)
  161. no_chunk = 0;
  162. out_sg = sg_next(out_sg);
  163. }
  164. if (no_chunk == 1)
  165. return sun4i_ss_opti_poll(areq);
  166. spin_lock_irqsave(&ss->slock, flags);
  167. for (i = 0; i < op->keylen; i += 4)
  168. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  169. if (areq->info) {
  170. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  171. v = *(u32 *)(areq->info + i * 4);
  172. writel(v, ss->base + SS_IV0 + i * 4);
  173. }
  174. }
  175. writel(mode, ss->base + SS_CTL);
  176. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  177. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  178. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  179. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  180. sg_miter_next(&mi);
  181. sg_miter_next(&mo);
  182. if (!mi.addr || !mo.addr) {
  183. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  184. err = -EINVAL;
  185. goto release_ss;
  186. }
  187. ileft = areq->nbytes;
  188. oleft = areq->nbytes;
  189. oi = 0;
  190. oo = 0;
  191. while (oleft > 0) {
  192. if (ileft > 0) {
  193. /*
  194. * todo is the number of consecutive 4byte word that we
  195. * can read from current SG
  196. */
  197. todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
  198. if (todo > 0 && ob == 0) {
  199. writesl(ss->base + SS_RXFIFO, mi.addr + oi,
  200. todo);
  201. ileft -= todo * 4;
  202. oi += todo * 4;
  203. } else {
  204. /*
  205. * not enough consecutive bytes, so we need to
  206. * linearize in buf. todo is in bytes
  207. * After that copy, if we have a multiple of 4
  208. * we need to be able to write all buf in one
  209. * pass, so it is why we min() with rx_cnt
  210. */
  211. todo = min3(rx_cnt * 4 - ob, ileft,
  212. mi.length - oi);
  213. memcpy(buf + ob, mi.addr + oi, todo);
  214. ileft -= todo;
  215. oi += todo;
  216. ob += todo;
  217. if (ob % 4 == 0) {
  218. writesl(ss->base + SS_RXFIFO, buf,
  219. ob / 4);
  220. ob = 0;
  221. }
  222. }
  223. if (oi == mi.length) {
  224. sg_miter_next(&mi);
  225. oi = 0;
  226. }
  227. }
  228. spaces = readl(ss->base + SS_FCSR);
  229. rx_cnt = SS_RXFIFO_SPACES(spaces);
  230. tx_cnt = SS_TXFIFO_SPACES(spaces);
  231. dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
  232. mode,
  233. oi, mi.length, ileft, areq->nbytes, rx_cnt,
  234. oo, mo.length, oleft, areq->nbytes, tx_cnt,
  235. todo, ob);
  236. if (tx_cnt == 0)
  237. continue;
  238. /* todo in 4bytes word */
  239. todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
  240. if (todo > 0) {
  241. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  242. oleft -= todo * 4;
  243. oo += todo * 4;
  244. if (oo == mo.length) {
  245. sg_miter_next(&mo);
  246. oo = 0;
  247. }
  248. } else {
  249. /*
  250. * read obl bytes in bufo, we read at maximum for
  251. * emptying the device
  252. */
  253. readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
  254. obl = tx_cnt * 4;
  255. obo = 0;
  256. do {
  257. /*
  258. * how many bytes we can copy ?
  259. * no more than remaining SG size
  260. * no more than remaining buffer
  261. * no need to test against oleft
  262. */
  263. todo = min(mo.length - oo, obl - obo);
  264. memcpy(mo.addr + oo, bufo + obo, todo);
  265. oleft -= todo;
  266. obo += todo;
  267. oo += todo;
  268. if (oo == mo.length) {
  269. sg_miter_next(&mo);
  270. oo = 0;
  271. }
  272. } while (obo < obl);
  273. /* bufo must be fully used here */
  274. }
  275. }
  276. if (areq->info) {
  277. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  278. v = readl(ss->base + SS_IV0 + i * 4);
  279. *(u32 *)(areq->info + i * 4) = v;
  280. }
  281. }
  282. release_ss:
  283. sg_miter_stop(&mi);
  284. sg_miter_stop(&mo);
  285. writel(0, ss->base + SS_CTL);
  286. spin_unlock_irqrestore(&ss->slock, flags);
  287. return err;
  288. }
  289. /* CBC AES */
  290. int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
  291. {
  292. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  293. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  294. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  295. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  296. op->keymode;
  297. return sun4i_ss_cipher_poll(areq);
  298. }
  299. int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
  300. {
  301. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  302. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  303. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  304. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  305. op->keymode;
  306. return sun4i_ss_cipher_poll(areq);
  307. }
  308. /* ECB AES */
  309. int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
  310. {
  311. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  312. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  313. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  314. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  315. op->keymode;
  316. return sun4i_ss_cipher_poll(areq);
  317. }
  318. int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
  319. {
  320. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  321. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  322. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  323. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  324. op->keymode;
  325. return sun4i_ss_cipher_poll(areq);
  326. }
  327. /* CBC DES */
  328. int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
  329. {
  330. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  331. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  332. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  333. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  334. op->keymode;
  335. return sun4i_ss_cipher_poll(areq);
  336. }
  337. int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
  338. {
  339. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  340. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  341. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  342. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  343. op->keymode;
  344. return sun4i_ss_cipher_poll(areq);
  345. }
  346. /* ECB DES */
  347. int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
  348. {
  349. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  350. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  351. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  352. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  353. op->keymode;
  354. return sun4i_ss_cipher_poll(areq);
  355. }
  356. int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
  357. {
  358. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  359. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  360. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  361. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  362. op->keymode;
  363. return sun4i_ss_cipher_poll(areq);
  364. }
  365. /* CBC 3DES */
  366. int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
  367. {
  368. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  369. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  370. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  371. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  372. op->keymode;
  373. return sun4i_ss_cipher_poll(areq);
  374. }
  375. int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
  376. {
  377. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  378. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  379. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  380. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  381. op->keymode;
  382. return sun4i_ss_cipher_poll(areq);
  383. }
  384. /* ECB 3DES */
  385. int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
  386. {
  387. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  388. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  389. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  390. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  391. op->keymode;
  392. return sun4i_ss_cipher_poll(areq);
  393. }
  394. int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
  395. {
  396. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  397. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  398. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  399. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  400. op->keymode;
  401. return sun4i_ss_cipher_poll(areq);
  402. }
  403. int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
  404. {
  405. struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
  406. struct crypto_alg *alg = tfm->__crt_alg;
  407. struct sun4i_ss_alg_template *algt;
  408. memset(op, 0, sizeof(struct sun4i_tfm_ctx));
  409. algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
  410. op->ss = algt->ss;
  411. tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
  412. return 0;
  413. }
  414. /* check and set the AES key, prepare the mode to be used */
  415. int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  416. unsigned int keylen)
  417. {
  418. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  419. struct sun4i_ss_ctx *ss = op->ss;
  420. switch (keylen) {
  421. case 128 / 8:
  422. op->keymode = SS_AES_128BITS;
  423. break;
  424. case 192 / 8:
  425. op->keymode = SS_AES_192BITS;
  426. break;
  427. case 256 / 8:
  428. op->keymode = SS_AES_256BITS;
  429. break;
  430. default:
  431. dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
  432. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  433. return -EINVAL;
  434. }
  435. op->keylen = keylen;
  436. memcpy(op->key, key, keylen);
  437. return 0;
  438. }
  439. /* check and set the DES key, prepare the mode to be used */
  440. int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  441. unsigned int keylen)
  442. {
  443. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  444. struct sun4i_ss_ctx *ss = op->ss;
  445. u32 flags;
  446. u32 tmp[DES_EXPKEY_WORDS];
  447. int ret;
  448. if (unlikely(keylen != DES_KEY_SIZE)) {
  449. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  450. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  451. return -EINVAL;
  452. }
  453. flags = crypto_ablkcipher_get_flags(tfm);
  454. ret = des_ekey(tmp, key);
  455. if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  456. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
  457. dev_dbg(ss->dev, "Weak key %u\n", keylen);
  458. return -EINVAL;
  459. }
  460. op->keylen = keylen;
  461. memcpy(op->key, key, keylen);
  462. return 0;
  463. }
  464. /* check and set the 3DES key, prepare the mode to be used */
  465. int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  466. unsigned int keylen)
  467. {
  468. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  469. struct sun4i_ss_ctx *ss = op->ss;
  470. if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
  471. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  472. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  473. return -EINVAL;
  474. }
  475. op->keylen = keylen;
  476. memcpy(op->key, key, keylen);
  477. return 0;
  478. }