algif_skcipher.c 21 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. /*
  2. * algif_skcipher: User-space interface for skcipher algorithms
  3. *
  4. * This file provides the user-space API for symmetric key ciphers.
  5. *
  6. * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <crypto/scatterwalk.h>
  15. #include <crypto/skcipher.h>
  16. #include <crypto/if_alg.h>
  17. #include <linux/init.h>
  18. #include <linux/list.h>
  19. #include <linux/kernel.h>
  20. #include <linux/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/net.h>
  23. #include <net/sock.h>
  24. struct skcipher_sg_list {
  25. struct list_head list;
  26. int cur;
  27. struct scatterlist sg[0];
  28. };
  29. struct skcipher_tfm {
  30. struct crypto_skcipher *skcipher;
  31. bool has_key;
  32. };
  33. struct skcipher_ctx {
  34. struct list_head tsgl;
  35. struct af_alg_sgl rsgl;
  36. void *iv;
  37. struct af_alg_completion completion;
  38. atomic_t inflight;
  39. unsigned used;
  40. unsigned int len;
  41. bool more;
  42. bool merge;
  43. bool enc;
  44. struct skcipher_request req;
  45. };
  46. struct skcipher_async_rsgl {
  47. struct af_alg_sgl sgl;
  48. struct list_head list;
  49. };
  50. struct skcipher_async_req {
  51. struct kiocb *iocb;
  52. struct skcipher_async_rsgl first_sgl;
  53. struct list_head list;
  54. struct scatterlist *tsg;
  55. atomic_t *inflight;
  56. struct skcipher_request req;
  57. };
  58. #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
  59. sizeof(struct scatterlist) - 1)
  60. static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
  61. {
  62. struct skcipher_async_rsgl *rsgl, *tmp;
  63. struct scatterlist *sgl;
  64. struct scatterlist *sg;
  65. int i, n;
  66. list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
  67. af_alg_free_sg(&rsgl->sgl);
  68. if (rsgl != &sreq->first_sgl)
  69. kfree(rsgl);
  70. }
  71. sgl = sreq->tsg;
  72. n = sg_nents(sgl);
  73. for_each_sg(sgl, sg, n, i) {
  74. struct page *page = sg_page(sg);
  75. /* some SGs may not have a page mapped */
  76. if (page && atomic_read(&page->_count))
  77. put_page(page);
  78. }
  79. kfree(sreq->tsg);
  80. }
  81. static void skcipher_async_cb(struct crypto_async_request *req, int err)
  82. {
  83. struct skcipher_async_req *sreq = req->data;
  84. struct kiocb *iocb = sreq->iocb;
  85. atomic_dec(sreq->inflight);
  86. skcipher_free_async_sgls(sreq);
  87. kzfree(sreq);
  88. iocb->ki_complete(iocb, err, err);
  89. }
  90. static inline int skcipher_sndbuf(struct sock *sk)
  91. {
  92. struct alg_sock *ask = alg_sk(sk);
  93. struct skcipher_ctx *ctx = ask->private;
  94. return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
  95. ctx->used, 0);
  96. }
  97. static inline bool skcipher_writable(struct sock *sk)
  98. {
  99. return PAGE_SIZE <= skcipher_sndbuf(sk);
  100. }
  101. static int skcipher_alloc_sgl(struct sock *sk)
  102. {
  103. struct alg_sock *ask = alg_sk(sk);
  104. struct skcipher_ctx *ctx = ask->private;
  105. struct skcipher_sg_list *sgl;
  106. struct scatterlist *sg = NULL;
  107. sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
  108. if (!list_empty(&ctx->tsgl))
  109. sg = sgl->sg;
  110. if (!sg || sgl->cur >= MAX_SGL_ENTS) {
  111. sgl = sock_kmalloc(sk, sizeof(*sgl) +
  112. sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
  113. GFP_KERNEL);
  114. if (!sgl)
  115. return -ENOMEM;
  116. sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
  117. sgl->cur = 0;
  118. if (sg) {
  119. sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
  120. sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
  121. }
  122. list_add_tail(&sgl->list, &ctx->tsgl);
  123. }
  124. return 0;
  125. }
  126. static void skcipher_pull_sgl(struct sock *sk, int used, int put)
  127. {
  128. struct alg_sock *ask = alg_sk(sk);
  129. struct skcipher_ctx *ctx = ask->private;
  130. struct skcipher_sg_list *sgl;
  131. struct scatterlist *sg;
  132. int i;
  133. while (!list_empty(&ctx->tsgl)) {
  134. sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
  135. list);
  136. sg = sgl->sg;
  137. for (i = 0; i < sgl->cur; i++) {
  138. int plen = min_t(int, used, sg[i].length);
  139. if (!sg_page(sg + i))
  140. continue;
  141. sg[i].length -= plen;
  142. sg[i].offset += plen;
  143. used -= plen;
  144. ctx->used -= plen;
  145. if (sg[i].length)
  146. return;
  147. if (put)
  148. put_page(sg_page(sg + i));
  149. sg_assign_page(sg + i, NULL);
  150. }
  151. list_del(&sgl->list);
  152. sock_kfree_s(sk, sgl,
  153. sizeof(*sgl) + sizeof(sgl->sg[0]) *
  154. (MAX_SGL_ENTS + 1));
  155. }
  156. if (!ctx->used)
  157. ctx->merge = 0;
  158. }
  159. static void skcipher_free_sgl(struct sock *sk)
  160. {
  161. struct alg_sock *ask = alg_sk(sk);
  162. struct skcipher_ctx *ctx = ask->private;
  163. skcipher_pull_sgl(sk, ctx->used, 1);
  164. }
  165. static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
  166. {
  167. long timeout;
  168. DEFINE_WAIT(wait);
  169. int err = -ERESTARTSYS;
  170. if (flags & MSG_DONTWAIT)
  171. return -EAGAIN;
  172. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  173. for (;;) {
  174. if (signal_pending(current))
  175. break;
  176. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  177. timeout = MAX_SCHEDULE_TIMEOUT;
  178. if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
  179. err = 0;
  180. break;
  181. }
  182. }
  183. finish_wait(sk_sleep(sk), &wait);
  184. return err;
  185. }
  186. static void skcipher_wmem_wakeup(struct sock *sk)
  187. {
  188. struct socket_wq *wq;
  189. if (!skcipher_writable(sk))
  190. return;
  191. rcu_read_lock();
  192. wq = rcu_dereference(sk->sk_wq);
  193. if (wq_has_sleeper(wq))
  194. wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
  195. POLLRDNORM |
  196. POLLRDBAND);
  197. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  198. rcu_read_unlock();
  199. }
  200. static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
  201. {
  202. struct alg_sock *ask = alg_sk(sk);
  203. struct skcipher_ctx *ctx = ask->private;
  204. long timeout;
  205. DEFINE_WAIT(wait);
  206. int err = -ERESTARTSYS;
  207. if (flags & MSG_DONTWAIT) {
  208. return -EAGAIN;
  209. }
  210. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  211. for (;;) {
  212. if (signal_pending(current))
  213. break;
  214. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  215. timeout = MAX_SCHEDULE_TIMEOUT;
  216. if (sk_wait_event(sk, &timeout, ctx->used)) {
  217. err = 0;
  218. break;
  219. }
  220. }
  221. finish_wait(sk_sleep(sk), &wait);
  222. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  223. return err;
  224. }
  225. static void skcipher_data_wakeup(struct sock *sk)
  226. {
  227. struct alg_sock *ask = alg_sk(sk);
  228. struct skcipher_ctx *ctx = ask->private;
  229. struct socket_wq *wq;
  230. if (!ctx->used)
  231. return;
  232. rcu_read_lock();
  233. wq = rcu_dereference(sk->sk_wq);
  234. if (wq_has_sleeper(wq))
  235. wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
  236. POLLRDNORM |
  237. POLLRDBAND);
  238. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  239. rcu_read_unlock();
  240. }
  241. static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
  242. size_t size)
  243. {
  244. struct sock *sk = sock->sk;
  245. struct alg_sock *ask = alg_sk(sk);
  246. struct sock *psk = ask->parent;
  247. struct alg_sock *pask = alg_sk(psk);
  248. struct skcipher_ctx *ctx = ask->private;
  249. struct skcipher_tfm *skc = pask->private;
  250. struct crypto_skcipher *tfm = skc->skcipher;
  251. unsigned ivsize = crypto_skcipher_ivsize(tfm);
  252. struct skcipher_sg_list *sgl;
  253. struct af_alg_control con = {};
  254. long copied = 0;
  255. bool enc = 0;
  256. bool init = 0;
  257. int err;
  258. int i;
  259. if (msg->msg_controllen) {
  260. err = af_alg_cmsg_send(msg, &con);
  261. if (err)
  262. return err;
  263. init = 1;
  264. switch (con.op) {
  265. case ALG_OP_ENCRYPT:
  266. enc = 1;
  267. break;
  268. case ALG_OP_DECRYPT:
  269. enc = 0;
  270. break;
  271. default:
  272. return -EINVAL;
  273. }
  274. if (con.iv && con.iv->ivlen != ivsize)
  275. return -EINVAL;
  276. }
  277. err = -EINVAL;
  278. lock_sock(sk);
  279. if (!ctx->more && ctx->used)
  280. goto unlock;
  281. if (init) {
  282. ctx->enc = enc;
  283. if (con.iv)
  284. memcpy(ctx->iv, con.iv->iv, ivsize);
  285. }
  286. while (size) {
  287. struct scatterlist *sg;
  288. unsigned long len = size;
  289. int plen;
  290. if (ctx->merge) {
  291. sgl = list_entry(ctx->tsgl.prev,
  292. struct skcipher_sg_list, list);
  293. sg = sgl->sg + sgl->cur - 1;
  294. len = min_t(unsigned long, len,
  295. PAGE_SIZE - sg->offset - sg->length);
  296. err = memcpy_from_msg(page_address(sg_page(sg)) +
  297. sg->offset + sg->length,
  298. msg, len);
  299. if (err)
  300. goto unlock;
  301. sg->length += len;
  302. ctx->merge = (sg->offset + sg->length) &
  303. (PAGE_SIZE - 1);
  304. ctx->used += len;
  305. copied += len;
  306. size -= len;
  307. continue;
  308. }
  309. if (!skcipher_writable(sk)) {
  310. err = skcipher_wait_for_wmem(sk, msg->msg_flags);
  311. if (err)
  312. goto unlock;
  313. }
  314. len = min_t(unsigned long, len, skcipher_sndbuf(sk));
  315. err = skcipher_alloc_sgl(sk);
  316. if (err)
  317. goto unlock;
  318. sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
  319. sg = sgl->sg;
  320. if (sgl->cur)
  321. sg_unmark_end(sg + sgl->cur - 1);
  322. do {
  323. i = sgl->cur;
  324. plen = min_t(int, len, PAGE_SIZE);
  325. sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
  326. err = -ENOMEM;
  327. if (!sg_page(sg + i))
  328. goto unlock;
  329. err = memcpy_from_msg(page_address(sg_page(sg + i)),
  330. msg, plen);
  331. if (err) {
  332. __free_page(sg_page(sg + i));
  333. sg_assign_page(sg + i, NULL);
  334. goto unlock;
  335. }
  336. sg[i].length = plen;
  337. len -= plen;
  338. ctx->used += plen;
  339. copied += plen;
  340. size -= plen;
  341. sgl->cur++;
  342. } while (len && sgl->cur < MAX_SGL_ENTS);
  343. if (!size)
  344. sg_mark_end(sg + sgl->cur - 1);
  345. ctx->merge = plen & (PAGE_SIZE - 1);
  346. }
  347. err = 0;
  348. ctx->more = msg->msg_flags & MSG_MORE;
  349. unlock:
  350. skcipher_data_wakeup(sk);
  351. release_sock(sk);
  352. return copied ?: err;
  353. }
  354. static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
  355. int offset, size_t size, int flags)
  356. {
  357. struct sock *sk = sock->sk;
  358. struct alg_sock *ask = alg_sk(sk);
  359. struct skcipher_ctx *ctx = ask->private;
  360. struct skcipher_sg_list *sgl;
  361. int err = -EINVAL;
  362. if (flags & MSG_SENDPAGE_NOTLAST)
  363. flags |= MSG_MORE;
  364. lock_sock(sk);
  365. if (!ctx->more && ctx->used)
  366. goto unlock;
  367. if (!size)
  368. goto done;
  369. if (!skcipher_writable(sk)) {
  370. err = skcipher_wait_for_wmem(sk, flags);
  371. if (err)
  372. goto unlock;
  373. }
  374. err = skcipher_alloc_sgl(sk);
  375. if (err)
  376. goto unlock;
  377. ctx->merge = 0;
  378. sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
  379. if (sgl->cur)
  380. sg_unmark_end(sgl->sg + sgl->cur - 1);
  381. sg_mark_end(sgl->sg + sgl->cur);
  382. get_page(page);
  383. sg_set_page(sgl->sg + sgl->cur, page, size, offset);
  384. sgl->cur++;
  385. ctx->used += size;
  386. done:
  387. ctx->more = flags & MSG_MORE;
  388. unlock:
  389. skcipher_data_wakeup(sk);
  390. release_sock(sk);
  391. return err ?: size;
  392. }
  393. static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
  394. {
  395. struct skcipher_sg_list *sgl;
  396. struct scatterlist *sg;
  397. int nents = 0;
  398. list_for_each_entry(sgl, &ctx->tsgl, list) {
  399. sg = sgl->sg;
  400. while (!sg->length)
  401. sg++;
  402. nents += sg_nents(sg);
  403. }
  404. return nents;
  405. }
  406. static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
  407. int flags)
  408. {
  409. struct sock *sk = sock->sk;
  410. struct alg_sock *ask = alg_sk(sk);
  411. struct sock *psk = ask->parent;
  412. struct alg_sock *pask = alg_sk(psk);
  413. struct skcipher_ctx *ctx = ask->private;
  414. struct skcipher_tfm *skc = pask->private;
  415. struct crypto_skcipher *tfm = skc->skcipher;
  416. struct skcipher_sg_list *sgl;
  417. struct scatterlist *sg;
  418. struct skcipher_async_req *sreq;
  419. struct skcipher_request *req;
  420. struct skcipher_async_rsgl *last_rsgl = NULL;
  421. unsigned int txbufs = 0, len = 0, tx_nents;
  422. unsigned int reqsize = crypto_skcipher_reqsize(tfm);
  423. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  424. int err = -ENOMEM;
  425. bool mark = false;
  426. char *iv;
  427. sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
  428. if (unlikely(!sreq))
  429. goto out;
  430. req = &sreq->req;
  431. iv = (char *)(req + 1) + reqsize;
  432. sreq->iocb = msg->msg_iocb;
  433. INIT_LIST_HEAD(&sreq->list);
  434. sreq->inflight = &ctx->inflight;
  435. lock_sock(sk);
  436. tx_nents = skcipher_all_sg_nents(ctx);
  437. sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
  438. if (unlikely(!sreq->tsg))
  439. goto unlock;
  440. sg_init_table(sreq->tsg, tx_nents);
  441. memcpy(iv, ctx->iv, ivsize);
  442. skcipher_request_set_tfm(req, tfm);
  443. skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
  444. skcipher_async_cb, sreq);
  445. while (iov_iter_count(&msg->msg_iter)) {
  446. struct skcipher_async_rsgl *rsgl;
  447. int used;
  448. if (!ctx->used) {
  449. err = skcipher_wait_for_data(sk, flags);
  450. if (err)
  451. goto free;
  452. }
  453. sgl = list_first_entry(&ctx->tsgl,
  454. struct skcipher_sg_list, list);
  455. sg = sgl->sg;
  456. while (!sg->length)
  457. sg++;
  458. used = min_t(unsigned long, ctx->used,
  459. iov_iter_count(&msg->msg_iter));
  460. used = min_t(unsigned long, used, sg->length);
  461. if (txbufs == tx_nents) {
  462. struct scatterlist *tmp;
  463. int x;
  464. /* Ran out of tx slots in async request
  465. * need to expand */
  466. tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
  467. GFP_KERNEL);
  468. if (!tmp)
  469. goto free;
  470. sg_init_table(tmp, tx_nents * 2);
  471. for (x = 0; x < tx_nents; x++)
  472. sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
  473. sreq->tsg[x].length,
  474. sreq->tsg[x].offset);
  475. kfree(sreq->tsg);
  476. sreq->tsg = tmp;
  477. tx_nents *= 2;
  478. mark = true;
  479. }
  480. /* Need to take over the tx sgl from ctx
  481. * to the asynch req - these sgls will be freed later */
  482. sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
  483. sg->offset);
  484. if (list_empty(&sreq->list)) {
  485. rsgl = &sreq->first_sgl;
  486. list_add_tail(&rsgl->list, &sreq->list);
  487. } else {
  488. rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
  489. if (!rsgl) {
  490. err = -ENOMEM;
  491. goto free;
  492. }
  493. list_add_tail(&rsgl->list, &sreq->list);
  494. }
  495. used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
  496. err = used;
  497. if (used < 0)
  498. goto free;
  499. if (last_rsgl)
  500. af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
  501. last_rsgl = rsgl;
  502. len += used;
  503. skcipher_pull_sgl(sk, used, 0);
  504. iov_iter_advance(&msg->msg_iter, used);
  505. }
  506. if (mark)
  507. sg_mark_end(sreq->tsg + txbufs - 1);
  508. skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
  509. len, iv);
  510. err = ctx->enc ? crypto_skcipher_encrypt(req) :
  511. crypto_skcipher_decrypt(req);
  512. if (err == -EINPROGRESS) {
  513. atomic_inc(&ctx->inflight);
  514. err = -EIOCBQUEUED;
  515. sreq = NULL;
  516. goto unlock;
  517. }
  518. free:
  519. skcipher_free_async_sgls(sreq);
  520. unlock:
  521. skcipher_wmem_wakeup(sk);
  522. release_sock(sk);
  523. kzfree(sreq);
  524. out:
  525. return err;
  526. }
  527. static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
  528. int flags)
  529. {
  530. struct sock *sk = sock->sk;
  531. struct alg_sock *ask = alg_sk(sk);
  532. struct sock *psk = ask->parent;
  533. struct alg_sock *pask = alg_sk(psk);
  534. struct skcipher_ctx *ctx = ask->private;
  535. struct skcipher_tfm *skc = pask->private;
  536. struct crypto_skcipher *tfm = skc->skcipher;
  537. unsigned bs = crypto_skcipher_blocksize(tfm);
  538. struct skcipher_sg_list *sgl;
  539. struct scatterlist *sg;
  540. int err = -EAGAIN;
  541. int used;
  542. long copied = 0;
  543. lock_sock(sk);
  544. while (msg_data_left(msg)) {
  545. if (!ctx->used) {
  546. err = skcipher_wait_for_data(sk, flags);
  547. if (err)
  548. goto unlock;
  549. }
  550. used = min_t(unsigned long, ctx->used, msg_data_left(msg));
  551. used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
  552. err = used;
  553. if (err < 0)
  554. goto unlock;
  555. if (ctx->more || used < ctx->used)
  556. used -= used % bs;
  557. err = -EINVAL;
  558. if (!used)
  559. goto free;
  560. sgl = list_first_entry(&ctx->tsgl,
  561. struct skcipher_sg_list, list);
  562. sg = sgl->sg;
  563. while (!sg->length)
  564. sg++;
  565. skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
  566. ctx->iv);
  567. err = af_alg_wait_for_completion(
  568. ctx->enc ?
  569. crypto_skcipher_encrypt(&ctx->req) :
  570. crypto_skcipher_decrypt(&ctx->req),
  571. &ctx->completion);
  572. free:
  573. af_alg_free_sg(&ctx->rsgl);
  574. if (err)
  575. goto unlock;
  576. copied += used;
  577. skcipher_pull_sgl(sk, used, 1);
  578. iov_iter_advance(&msg->msg_iter, used);
  579. }
  580. err = 0;
  581. unlock:
  582. skcipher_wmem_wakeup(sk);
  583. release_sock(sk);
  584. return copied ?: err;
  585. }
  586. static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
  587. size_t ignored, int flags)
  588. {
  589. return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
  590. skcipher_recvmsg_async(sock, msg, flags) :
  591. skcipher_recvmsg_sync(sock, msg, flags);
  592. }
  593. static unsigned int skcipher_poll(struct file *file, struct socket *sock,
  594. poll_table *wait)
  595. {
  596. struct sock *sk = sock->sk;
  597. struct alg_sock *ask = alg_sk(sk);
  598. struct skcipher_ctx *ctx = ask->private;
  599. unsigned int mask;
  600. sock_poll_wait(file, sk_sleep(sk), wait);
  601. mask = 0;
  602. if (ctx->used)
  603. mask |= POLLIN | POLLRDNORM;
  604. if (skcipher_writable(sk))
  605. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  606. return mask;
  607. }
  608. static struct proto_ops algif_skcipher_ops = {
  609. .family = PF_ALG,
  610. .connect = sock_no_connect,
  611. .socketpair = sock_no_socketpair,
  612. .getname = sock_no_getname,
  613. .ioctl = sock_no_ioctl,
  614. .listen = sock_no_listen,
  615. .shutdown = sock_no_shutdown,
  616. .getsockopt = sock_no_getsockopt,
  617. .mmap = sock_no_mmap,
  618. .bind = sock_no_bind,
  619. .accept = sock_no_accept,
  620. .setsockopt = sock_no_setsockopt,
  621. .release = af_alg_release,
  622. .sendmsg = skcipher_sendmsg,
  623. .sendpage = skcipher_sendpage,
  624. .recvmsg = skcipher_recvmsg,
  625. .poll = skcipher_poll,
  626. };
  627. static int skcipher_check_key(struct socket *sock)
  628. {
  629. int err = 0;
  630. struct sock *psk;
  631. struct alg_sock *pask;
  632. struct skcipher_tfm *tfm;
  633. struct sock *sk = sock->sk;
  634. struct alg_sock *ask = alg_sk(sk);
  635. lock_sock(sk);
  636. if (ask->refcnt)
  637. goto unlock_child;
  638. psk = ask->parent;
  639. pask = alg_sk(ask->parent);
  640. tfm = pask->private;
  641. err = -ENOKEY;
  642. lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
  643. if (!tfm->has_key)
  644. goto unlock;
  645. if (!pask->refcnt++)
  646. sock_hold(psk);
  647. ask->refcnt = 1;
  648. sock_put(psk);
  649. err = 0;
  650. unlock:
  651. release_sock(psk);
  652. unlock_child:
  653. release_sock(sk);
  654. return err;
  655. }
  656. static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
  657. size_t size)
  658. {
  659. int err;
  660. err = skcipher_check_key(sock);
  661. if (err)
  662. return err;
  663. return skcipher_sendmsg(sock, msg, size);
  664. }
  665. static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
  666. int offset, size_t size, int flags)
  667. {
  668. int err;
  669. err = skcipher_check_key(sock);
  670. if (err)
  671. return err;
  672. return skcipher_sendpage(sock, page, offset, size, flags);
  673. }
  674. static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
  675. size_t ignored, int flags)
  676. {
  677. int err;
  678. err = skcipher_check_key(sock);
  679. if (err)
  680. return err;
  681. return skcipher_recvmsg(sock, msg, ignored, flags);
  682. }
  683. static struct proto_ops algif_skcipher_ops_nokey = {
  684. .family = PF_ALG,
  685. .connect = sock_no_connect,
  686. .socketpair = sock_no_socketpair,
  687. .getname = sock_no_getname,
  688. .ioctl = sock_no_ioctl,
  689. .listen = sock_no_listen,
  690. .shutdown = sock_no_shutdown,
  691. .getsockopt = sock_no_getsockopt,
  692. .mmap = sock_no_mmap,
  693. .bind = sock_no_bind,
  694. .accept = sock_no_accept,
  695. .setsockopt = sock_no_setsockopt,
  696. .release = af_alg_release,
  697. .sendmsg = skcipher_sendmsg_nokey,
  698. .sendpage = skcipher_sendpage_nokey,
  699. .recvmsg = skcipher_recvmsg_nokey,
  700. .poll = skcipher_poll,
  701. };
  702. static void *skcipher_bind(const char *name, u32 type, u32 mask)
  703. {
  704. struct skcipher_tfm *tfm;
  705. struct crypto_skcipher *skcipher;
  706. tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
  707. if (!tfm)
  708. return ERR_PTR(-ENOMEM);
  709. skcipher = crypto_alloc_skcipher(name, type, mask);
  710. if (IS_ERR(skcipher)) {
  711. kfree(tfm);
  712. return ERR_CAST(skcipher);
  713. }
  714. tfm->skcipher = skcipher;
  715. return tfm;
  716. }
  717. static void skcipher_release(void *private)
  718. {
  719. struct skcipher_tfm *tfm = private;
  720. crypto_free_skcipher(tfm->skcipher);
  721. kfree(tfm);
  722. }
  723. static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
  724. {
  725. struct skcipher_tfm *tfm = private;
  726. int err;
  727. err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
  728. tfm->has_key = !err;
  729. return err;
  730. }
  731. static void skcipher_wait(struct sock *sk)
  732. {
  733. struct alg_sock *ask = alg_sk(sk);
  734. struct skcipher_ctx *ctx = ask->private;
  735. int ctr = 0;
  736. while (atomic_read(&ctx->inflight) && ctr++ < 100)
  737. msleep(100);
  738. }
  739. static void skcipher_sock_destruct(struct sock *sk)
  740. {
  741. struct alg_sock *ask = alg_sk(sk);
  742. struct skcipher_ctx *ctx = ask->private;
  743. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
  744. if (atomic_read(&ctx->inflight))
  745. skcipher_wait(sk);
  746. skcipher_free_sgl(sk);
  747. sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
  748. sock_kfree_s(sk, ctx, ctx->len);
  749. af_alg_release_parent(sk);
  750. }
  751. static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
  752. {
  753. struct skcipher_ctx *ctx;
  754. struct alg_sock *ask = alg_sk(sk);
  755. struct skcipher_tfm *tfm = private;
  756. struct crypto_skcipher *skcipher = tfm->skcipher;
  757. unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
  758. ctx = sock_kmalloc(sk, len, GFP_KERNEL);
  759. if (!ctx)
  760. return -ENOMEM;
  761. ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
  762. GFP_KERNEL);
  763. if (!ctx->iv) {
  764. sock_kfree_s(sk, ctx, len);
  765. return -ENOMEM;
  766. }
  767. memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
  768. INIT_LIST_HEAD(&ctx->tsgl);
  769. ctx->len = len;
  770. ctx->used = 0;
  771. ctx->more = 0;
  772. ctx->merge = 0;
  773. ctx->enc = 0;
  774. atomic_set(&ctx->inflight, 0);
  775. af_alg_init_completion(&ctx->completion);
  776. ask->private = ctx;
  777. skcipher_request_set_tfm(&ctx->req, skcipher);
  778. skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
  779. CRYPTO_TFM_REQ_MAY_BACKLOG,
  780. af_alg_complete, &ctx->completion);
  781. sk->sk_destruct = skcipher_sock_destruct;
  782. return 0;
  783. }
  784. static int skcipher_accept_parent(void *private, struct sock *sk)
  785. {
  786. struct skcipher_tfm *tfm = private;
  787. if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
  788. return -ENOKEY;
  789. return skcipher_accept_parent_nokey(private, sk);
  790. }
  791. static const struct af_alg_type algif_type_skcipher = {
  792. .bind = skcipher_bind,
  793. .release = skcipher_release,
  794. .setkey = skcipher_setkey,
  795. .accept = skcipher_accept_parent,
  796. .accept_nokey = skcipher_accept_parent_nokey,
  797. .ops = &algif_skcipher_ops,
  798. .ops_nokey = &algif_skcipher_ops_nokey,
  799. .name = "skcipher",
  800. .owner = THIS_MODULE
  801. };
  802. static int __init algif_skcipher_init(void)
  803. {
  804. return af_alg_register_type(&algif_type_skcipher);
  805. }
  806. static void __exit algif_skcipher_exit(void)
  807. {
  808. int err = af_alg_unregister_type(&algif_type_skcipher);
  809. BUG_ON(err);
  810. }
  811. module_init(algif_skcipher_init);
  812. module_exit(algif_skcipher_exit);
  813. MODULE_LICENSE("GPL");