gss_krb5_crypto.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. /*
  2. * linux/net/sunrpc/gss_krb5_crypto.c
  3. *
  4. * Copyright (c) 2000-2008 The Regents of the University of Michigan.
  5. * All rights reserved.
  6. *
  7. * Andy Adamson <andros@umich.edu>
  8. * Bruce Fields <bfields@umich.edu>
  9. */
  10. /*
  11. * Copyright (C) 1998 by the FundsXpress, INC.
  12. *
  13. * All rights reserved.
  14. *
  15. * Export of this software from the United States of America may require
  16. * a specific license from the United States Government. It is the
  17. * responsibility of any person or organization contemplating export to
  18. * obtain such a license before exporting.
  19. *
  20. * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
  21. * distribute this software and its documentation for any purpose and
  22. * without fee is hereby granted, provided that the above copyright
  23. * notice appear in all copies and that both that copyright notice and
  24. * this permission notice appear in supporting documentation, and that
  25. * the name of FundsXpress. not be used in advertising or publicity pertaining
  26. * to distribution of the software without specific, written prior
  27. * permission. FundsXpress makes no representations about the suitability of
  28. * this software for any purpose. It is provided "as is" without express
  29. * or implied warranty.
  30. *
  31. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  32. * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  33. * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  34. */
  35. #include <linux/err.h>
  36. #include <linux/types.h>
  37. #include <linux/mm.h>
  38. #include <linux/scatterlist.h>
  39. #include <linux/crypto.h>
  40. #include <linux/highmem.h>
  41. #include <linux/pagemap.h>
  42. #include <linux/random.h>
  43. #include <linux/sunrpc/gss_krb5.h>
  44. #include <linux/sunrpc/xdr.h>
  45. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  46. # define RPCDBG_FACILITY RPCDBG_AUTH
  47. #endif
  48. u32
  49. krb5_encrypt(
  50. struct crypto_blkcipher *tfm,
  51. void * iv,
  52. void * in,
  53. void * out,
  54. int length)
  55. {
  56. u32 ret = -EINVAL;
  57. struct scatterlist sg[1];
  58. u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
  59. struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
  60. if (length % crypto_blkcipher_blocksize(tfm) != 0)
  61. goto out;
  62. if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
  63. dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
  64. crypto_blkcipher_ivsize(tfm));
  65. goto out;
  66. }
  67. if (iv)
  68. memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
  69. memcpy(out, in, length);
  70. sg_init_one(sg, out, length);
  71. ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
  72. out:
  73. dprintk("RPC: krb5_encrypt returns %d\n", ret);
  74. return ret;
  75. }
  76. u32
  77. krb5_decrypt(
  78. struct crypto_blkcipher *tfm,
  79. void * iv,
  80. void * in,
  81. void * out,
  82. int length)
  83. {
  84. u32 ret = -EINVAL;
  85. struct scatterlist sg[1];
  86. u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
  87. struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
  88. if (length % crypto_blkcipher_blocksize(tfm) != 0)
  89. goto out;
  90. if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
  91. dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
  92. crypto_blkcipher_ivsize(tfm));
  93. goto out;
  94. }
  95. if (iv)
  96. memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
  97. memcpy(out, in, length);
  98. sg_init_one(sg, out, length);
  99. ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
  100. out:
  101. dprintk("RPC: gss_k5decrypt returns %d\n",ret);
  102. return ret;
  103. }
  104. static int
  105. checksummer(struct scatterlist *sg, void *data)
  106. {
  107. struct hash_desc *desc = data;
  108. return crypto_hash_update(desc, sg, sg->length);
  109. }
  110. static int
  111. arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
  112. {
  113. unsigned int ms_usage;
  114. switch (usage) {
  115. case KG_USAGE_SIGN:
  116. ms_usage = 15;
  117. break;
  118. case KG_USAGE_SEAL:
  119. ms_usage = 13;
  120. break;
  121. default:
  122. return -EINVAL;
  123. }
  124. salt[0] = (ms_usage >> 0) & 0xff;
  125. salt[1] = (ms_usage >> 8) & 0xff;
  126. salt[2] = (ms_usage >> 16) & 0xff;
  127. salt[3] = (ms_usage >> 24) & 0xff;
  128. return 0;
  129. }
  130. static u32
  131. make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
  132. struct xdr_buf *body, int body_offset, u8 *cksumkey,
  133. unsigned int usage, struct xdr_netobj *cksumout)
  134. {
  135. struct hash_desc desc;
  136. struct scatterlist sg[1];
  137. int err;
  138. u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
  139. u8 rc4salt[4];
  140. struct crypto_hash *md5;
  141. struct crypto_hash *hmac_md5;
  142. if (cksumkey == NULL)
  143. return GSS_S_FAILURE;
  144. if (cksumout->len < kctx->gk5e->cksumlength) {
  145. dprintk("%s: checksum buffer length, %u, too small for %s\n",
  146. __func__, cksumout->len, kctx->gk5e->name);
  147. return GSS_S_FAILURE;
  148. }
  149. if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
  150. dprintk("%s: invalid usage value %u\n", __func__, usage);
  151. return GSS_S_FAILURE;
  152. }
  153. md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
  154. if (IS_ERR(md5))
  155. return GSS_S_FAILURE;
  156. hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
  157. CRYPTO_ALG_ASYNC);
  158. if (IS_ERR(hmac_md5)) {
  159. crypto_free_hash(md5);
  160. return GSS_S_FAILURE;
  161. }
  162. desc.tfm = md5;
  163. desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  164. err = crypto_hash_init(&desc);
  165. if (err)
  166. goto out;
  167. sg_init_one(sg, rc4salt, 4);
  168. err = crypto_hash_update(&desc, sg, 4);
  169. if (err)
  170. goto out;
  171. sg_init_one(sg, header, hdrlen);
  172. err = crypto_hash_update(&desc, sg, hdrlen);
  173. if (err)
  174. goto out;
  175. err = xdr_process_buf(body, body_offset, body->len - body_offset,
  176. checksummer, &desc);
  177. if (err)
  178. goto out;
  179. err = crypto_hash_final(&desc, checksumdata);
  180. if (err)
  181. goto out;
  182. desc.tfm = hmac_md5;
  183. desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  184. err = crypto_hash_init(&desc);
  185. if (err)
  186. goto out;
  187. err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
  188. if (err)
  189. goto out;
  190. sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
  191. err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
  192. checksumdata);
  193. if (err)
  194. goto out;
  195. memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
  196. cksumout->len = kctx->gk5e->cksumlength;
  197. out:
  198. crypto_free_hash(md5);
  199. crypto_free_hash(hmac_md5);
  200. return err ? GSS_S_FAILURE : 0;
  201. }
  202. /*
  203. * checksum the plaintext data and hdrlen bytes of the token header
  204. * The checksum is performed over the first 8 bytes of the
  205. * gss token header and then over the data body
  206. */
  207. u32
  208. make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
  209. struct xdr_buf *body, int body_offset, u8 *cksumkey,
  210. unsigned int usage, struct xdr_netobj *cksumout)
  211. {
  212. struct hash_desc desc;
  213. struct scatterlist sg[1];
  214. int err;
  215. u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
  216. unsigned int checksumlen;
  217. if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
  218. return make_checksum_hmac_md5(kctx, header, hdrlen,
  219. body, body_offset,
  220. cksumkey, usage, cksumout);
  221. if (cksumout->len < kctx->gk5e->cksumlength) {
  222. dprintk("%s: checksum buffer length, %u, too small for %s\n",
  223. __func__, cksumout->len, kctx->gk5e->name);
  224. return GSS_S_FAILURE;
  225. }
  226. desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
  227. if (IS_ERR(desc.tfm))
  228. return GSS_S_FAILURE;
  229. desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  230. checksumlen = crypto_hash_digestsize(desc.tfm);
  231. if (cksumkey != NULL) {
  232. err = crypto_hash_setkey(desc.tfm, cksumkey,
  233. kctx->gk5e->keylength);
  234. if (err)
  235. goto out;
  236. }
  237. err = crypto_hash_init(&desc);
  238. if (err)
  239. goto out;
  240. sg_init_one(sg, header, hdrlen);
  241. err = crypto_hash_update(&desc, sg, hdrlen);
  242. if (err)
  243. goto out;
  244. err = xdr_process_buf(body, body_offset, body->len - body_offset,
  245. checksummer, &desc);
  246. if (err)
  247. goto out;
  248. err = crypto_hash_final(&desc, checksumdata);
  249. if (err)
  250. goto out;
  251. switch (kctx->gk5e->ctype) {
  252. case CKSUMTYPE_RSA_MD5:
  253. err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
  254. checksumdata, checksumlen);
  255. if (err)
  256. goto out;
  257. memcpy(cksumout->data,
  258. checksumdata + checksumlen - kctx->gk5e->cksumlength,
  259. kctx->gk5e->cksumlength);
  260. break;
  261. case CKSUMTYPE_HMAC_SHA1_DES3:
  262. memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
  263. break;
  264. default:
  265. BUG();
  266. break;
  267. }
  268. cksumout->len = kctx->gk5e->cksumlength;
  269. out:
  270. crypto_free_hash(desc.tfm);
  271. return err ? GSS_S_FAILURE : 0;
  272. }
  273. /*
  274. * checksum the plaintext data and hdrlen bytes of the token header
  275. * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
  276. * body then over the first 16 octets of the MIC token
  277. * Inclusion of the header data in the calculation of the
  278. * checksum is optional.
  279. */
  280. u32
  281. make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
  282. struct xdr_buf *body, int body_offset, u8 *cksumkey,
  283. unsigned int usage, struct xdr_netobj *cksumout)
  284. {
  285. struct hash_desc desc;
  286. struct scatterlist sg[1];
  287. int err;
  288. u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
  289. unsigned int checksumlen;
  290. if (kctx->gk5e->keyed_cksum == 0) {
  291. dprintk("%s: expected keyed hash for %s\n",
  292. __func__, kctx->gk5e->name);
  293. return GSS_S_FAILURE;
  294. }
  295. if (cksumkey == NULL) {
  296. dprintk("%s: no key supplied for %s\n",
  297. __func__, kctx->gk5e->name);
  298. return GSS_S_FAILURE;
  299. }
  300. desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
  301. CRYPTO_ALG_ASYNC);
  302. if (IS_ERR(desc.tfm))
  303. return GSS_S_FAILURE;
  304. checksumlen = crypto_hash_digestsize(desc.tfm);
  305. desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  306. err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
  307. if (err)
  308. goto out;
  309. err = crypto_hash_init(&desc);
  310. if (err)
  311. goto out;
  312. err = xdr_process_buf(body, body_offset, body->len - body_offset,
  313. checksummer, &desc);
  314. if (err)
  315. goto out;
  316. if (header != NULL) {
  317. sg_init_one(sg, header, hdrlen);
  318. err = crypto_hash_update(&desc, sg, hdrlen);
  319. if (err)
  320. goto out;
  321. }
  322. err = crypto_hash_final(&desc, checksumdata);
  323. if (err)
  324. goto out;
  325. cksumout->len = kctx->gk5e->cksumlength;
  326. switch (kctx->gk5e->ctype) {
  327. case CKSUMTYPE_HMAC_SHA1_96_AES128:
  328. case CKSUMTYPE_HMAC_SHA1_96_AES256:
  329. /* note that this truncates the hash */
  330. memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
  331. break;
  332. default:
  333. BUG();
  334. break;
  335. }
  336. out:
  337. crypto_free_hash(desc.tfm);
  338. return err ? GSS_S_FAILURE : 0;
  339. }
  340. struct encryptor_desc {
  341. u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
  342. struct blkcipher_desc desc;
  343. int pos;
  344. struct xdr_buf *outbuf;
  345. struct page **pages;
  346. struct scatterlist infrags[4];
  347. struct scatterlist outfrags[4];
  348. int fragno;
  349. int fraglen;
  350. };
  351. static int
  352. encryptor(struct scatterlist *sg, void *data)
  353. {
  354. struct encryptor_desc *desc = data;
  355. struct xdr_buf *outbuf = desc->outbuf;
  356. struct page *in_page;
  357. int thislen = desc->fraglen + sg->length;
  358. int fraglen, ret;
  359. int page_pos;
  360. /* Worst case is 4 fragments: head, end of page 1, start
  361. * of page 2, tail. Anything more is a bug. */
  362. BUG_ON(desc->fragno > 3);
  363. page_pos = desc->pos - outbuf->head[0].iov_len;
  364. if (page_pos >= 0 && page_pos < outbuf->page_len) {
  365. /* pages are not in place: */
  366. int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
  367. in_page = desc->pages[i];
  368. } else {
  369. in_page = sg_page(sg);
  370. }
  371. sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
  372. sg->offset);
  373. sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
  374. sg->offset);
  375. desc->fragno++;
  376. desc->fraglen += sg->length;
  377. desc->pos += sg->length;
  378. fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
  379. thislen -= fraglen;
  380. if (thislen == 0)
  381. return 0;
  382. sg_mark_end(&desc->infrags[desc->fragno - 1]);
  383. sg_mark_end(&desc->outfrags[desc->fragno - 1]);
  384. ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
  385. desc->infrags, thislen);
  386. if (ret)
  387. return ret;
  388. sg_init_table(desc->infrags, 4);
  389. sg_init_table(desc->outfrags, 4);
  390. if (fraglen) {
  391. sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
  392. sg->offset + sg->length - fraglen);
  393. desc->infrags[0] = desc->outfrags[0];
  394. sg_assign_page(&desc->infrags[0], in_page);
  395. desc->fragno = 1;
  396. desc->fraglen = fraglen;
  397. } else {
  398. desc->fragno = 0;
  399. desc->fraglen = 0;
  400. }
  401. return 0;
  402. }
  403. int
  404. gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
  405. int offset, struct page **pages)
  406. {
  407. int ret;
  408. struct encryptor_desc desc;
  409. BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
  410. memset(desc.iv, 0, sizeof(desc.iv));
  411. desc.desc.tfm = tfm;
  412. desc.desc.info = desc.iv;
  413. desc.desc.flags = 0;
  414. desc.pos = offset;
  415. desc.outbuf = buf;
  416. desc.pages = pages;
  417. desc.fragno = 0;
  418. desc.fraglen = 0;
  419. sg_init_table(desc.infrags, 4);
  420. sg_init_table(desc.outfrags, 4);
  421. ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
  422. return ret;
  423. }
  424. struct decryptor_desc {
  425. u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
  426. struct blkcipher_desc desc;
  427. struct scatterlist frags[4];
  428. int fragno;
  429. int fraglen;
  430. };
  431. static int
  432. decryptor(struct scatterlist *sg, void *data)
  433. {
  434. struct decryptor_desc *desc = data;
  435. int thislen = desc->fraglen + sg->length;
  436. int fraglen, ret;
  437. /* Worst case is 4 fragments: head, end of page 1, start
  438. * of page 2, tail. Anything more is a bug. */
  439. BUG_ON(desc->fragno > 3);
  440. sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
  441. sg->offset);
  442. desc->fragno++;
  443. desc->fraglen += sg->length;
  444. fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
  445. thislen -= fraglen;
  446. if (thislen == 0)
  447. return 0;
  448. sg_mark_end(&desc->frags[desc->fragno - 1]);
  449. ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
  450. desc->frags, thislen);
  451. if (ret)
  452. return ret;
  453. sg_init_table(desc->frags, 4);
  454. if (fraglen) {
  455. sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
  456. sg->offset + sg->length - fraglen);
  457. desc->fragno = 1;
  458. desc->fraglen = fraglen;
  459. } else {
  460. desc->fragno = 0;
  461. desc->fraglen = 0;
  462. }
  463. return 0;
  464. }
  465. int
  466. gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
  467. int offset)
  468. {
  469. struct decryptor_desc desc;
  470. /* XXXJBF: */
  471. BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
  472. memset(desc.iv, 0, sizeof(desc.iv));
  473. desc.desc.tfm = tfm;
  474. desc.desc.info = desc.iv;
  475. desc.desc.flags = 0;
  476. desc.fragno = 0;
  477. desc.fraglen = 0;
  478. sg_init_table(desc.frags, 4);
  479. return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
  480. }
  481. /*
  482. * This function makes the assumption that it was ultimately called
  483. * from gss_wrap().
  484. *
  485. * The client auth_gss code moves any existing tail data into a
  486. * separate page before calling gss_wrap.
  487. * The server svcauth_gss code ensures that both the head and the
  488. * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
  489. *
  490. * Even with that guarantee, this function may be called more than
  491. * once in the processing of gss_wrap(). The best we can do is
  492. * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
  493. * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
  494. * At run-time we can verify that a single invocation of this
  495. * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
  496. */
  497. int
  498. xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
  499. {
  500. u8 *p;
  501. if (shiftlen == 0)
  502. return 0;
  503. BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
  504. BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
  505. p = buf->head[0].iov_base + base;
  506. memmove(p + shiftlen, p, buf->head[0].iov_len - base);
  507. buf->head[0].iov_len += shiftlen;
  508. buf->len += shiftlen;
  509. return 0;
  510. }
  511. static u32
  512. gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
  513. u32 offset, u8 *iv, struct page **pages, int encrypt)
  514. {
  515. u32 ret;
  516. struct scatterlist sg[1];
  517. struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
  518. u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2];
  519. struct page **save_pages;
  520. u32 len = buf->len - offset;
  521. if (len > ARRAY_SIZE(data)) {
  522. WARN_ON(0);
  523. return -ENOMEM;
  524. }
  525. /*
  526. * For encryption, we want to read from the cleartext
  527. * page cache pages, and write the encrypted data to
  528. * the supplied xdr_buf pages.
  529. */
  530. save_pages = buf->pages;
  531. if (encrypt)
  532. buf->pages = pages;
  533. ret = read_bytes_from_xdr_buf(buf, offset, data, len);
  534. buf->pages = save_pages;
  535. if (ret)
  536. goto out;
  537. sg_init_one(sg, data, len);
  538. if (encrypt)
  539. ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
  540. else
  541. ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
  542. if (ret)
  543. goto out;
  544. ret = write_bytes_to_xdr_buf(buf, offset, data, len);
  545. out:
  546. return ret;
  547. }
  548. u32
  549. gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
  550. struct xdr_buf *buf, struct page **pages)
  551. {
  552. u32 err;
  553. struct xdr_netobj hmac;
  554. u8 *cksumkey;
  555. u8 *ecptr;
  556. struct crypto_blkcipher *cipher, *aux_cipher;
  557. int blocksize;
  558. struct page **save_pages;
  559. int nblocks, nbytes;
  560. struct encryptor_desc desc;
  561. u32 cbcbytes;
  562. unsigned int usage;
  563. if (kctx->initiate) {
  564. cipher = kctx->initiator_enc;
  565. aux_cipher = kctx->initiator_enc_aux;
  566. cksumkey = kctx->initiator_integ;
  567. usage = KG_USAGE_INITIATOR_SEAL;
  568. } else {
  569. cipher = kctx->acceptor_enc;
  570. aux_cipher = kctx->acceptor_enc_aux;
  571. cksumkey = kctx->acceptor_integ;
  572. usage = KG_USAGE_ACCEPTOR_SEAL;
  573. }
  574. blocksize = crypto_blkcipher_blocksize(cipher);
  575. /* hide the gss token header and insert the confounder */
  576. offset += GSS_KRB5_TOK_HDR_LEN;
  577. if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
  578. return GSS_S_FAILURE;
  579. gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
  580. offset -= GSS_KRB5_TOK_HDR_LEN;
  581. if (buf->tail[0].iov_base != NULL) {
  582. ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
  583. } else {
  584. buf->tail[0].iov_base = buf->head[0].iov_base
  585. + buf->head[0].iov_len;
  586. buf->tail[0].iov_len = 0;
  587. ecptr = buf->tail[0].iov_base;
  588. }
  589. /* copy plaintext gss token header after filler (if any) */
  590. memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
  591. buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
  592. buf->len += GSS_KRB5_TOK_HDR_LEN;
  593. /* Do the HMAC */
  594. hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
  595. hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  596. /*
  597. * When we are called, pages points to the real page cache
  598. * data -- which we can't go and encrypt! buf->pages points
  599. * to scratch pages which we are going to send off to the
  600. * client/server. Swap in the plaintext pages to calculate
  601. * the hmac.
  602. */
  603. save_pages = buf->pages;
  604. buf->pages = pages;
  605. err = make_checksum_v2(kctx, NULL, 0, buf,
  606. offset + GSS_KRB5_TOK_HDR_LEN,
  607. cksumkey, usage, &hmac);
  608. buf->pages = save_pages;
  609. if (err)
  610. return GSS_S_FAILURE;
  611. nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
  612. nblocks = (nbytes + blocksize - 1) / blocksize;
  613. cbcbytes = 0;
  614. if (nblocks > 2)
  615. cbcbytes = (nblocks - 2) * blocksize;
  616. memset(desc.iv, 0, sizeof(desc.iv));
  617. if (cbcbytes) {
  618. desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
  619. desc.fragno = 0;
  620. desc.fraglen = 0;
  621. desc.pages = pages;
  622. desc.outbuf = buf;
  623. desc.desc.info = desc.iv;
  624. desc.desc.flags = 0;
  625. desc.desc.tfm = aux_cipher;
  626. sg_init_table(desc.infrags, 4);
  627. sg_init_table(desc.outfrags, 4);
  628. err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
  629. cbcbytes, encryptor, &desc);
  630. if (err)
  631. goto out_err;
  632. }
  633. /* Make sure IV carries forward from any CBC results. */
  634. err = gss_krb5_cts_crypt(cipher, buf,
  635. offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
  636. desc.iv, pages, 1);
  637. if (err) {
  638. err = GSS_S_FAILURE;
  639. goto out_err;
  640. }
  641. /* Now update buf to account for HMAC */
  642. buf->tail[0].iov_len += kctx->gk5e->cksumlength;
  643. buf->len += kctx->gk5e->cksumlength;
  644. out_err:
  645. if (err)
  646. err = GSS_S_FAILURE;
  647. return err;
  648. }
  649. u32
  650. gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
  651. u32 *headskip, u32 *tailskip)
  652. {
  653. struct xdr_buf subbuf;
  654. u32 ret = 0;
  655. u8 *cksum_key;
  656. struct crypto_blkcipher *cipher, *aux_cipher;
  657. struct xdr_netobj our_hmac_obj;
  658. u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
  659. u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
  660. int nblocks, blocksize, cbcbytes;
  661. struct decryptor_desc desc;
  662. unsigned int usage;
  663. if (kctx->initiate) {
  664. cipher = kctx->acceptor_enc;
  665. aux_cipher = kctx->acceptor_enc_aux;
  666. cksum_key = kctx->acceptor_integ;
  667. usage = KG_USAGE_ACCEPTOR_SEAL;
  668. } else {
  669. cipher = kctx->initiator_enc;
  670. aux_cipher = kctx->initiator_enc_aux;
  671. cksum_key = kctx->initiator_integ;
  672. usage = KG_USAGE_INITIATOR_SEAL;
  673. }
  674. blocksize = crypto_blkcipher_blocksize(cipher);
  675. /* create a segment skipping the header and leaving out the checksum */
  676. xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
  677. (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
  678. kctx->gk5e->cksumlength));
  679. nblocks = (subbuf.len + blocksize - 1) / blocksize;
  680. cbcbytes = 0;
  681. if (nblocks > 2)
  682. cbcbytes = (nblocks - 2) * blocksize;
  683. memset(desc.iv, 0, sizeof(desc.iv));
  684. if (cbcbytes) {
  685. desc.fragno = 0;
  686. desc.fraglen = 0;
  687. desc.desc.info = desc.iv;
  688. desc.desc.flags = 0;
  689. desc.desc.tfm = aux_cipher;
  690. sg_init_table(desc.frags, 4);
  691. ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
  692. if (ret)
  693. goto out_err;
  694. }
  695. /* Make sure IV carries forward from any CBC results. */
  696. ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
  697. if (ret)
  698. goto out_err;
  699. /* Calculate our hmac over the plaintext data */
  700. our_hmac_obj.len = sizeof(our_hmac);
  701. our_hmac_obj.data = our_hmac;
  702. ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
  703. cksum_key, usage, &our_hmac_obj);
  704. if (ret)
  705. goto out_err;
  706. /* Get the packet's hmac value */
  707. ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
  708. pkt_hmac, kctx->gk5e->cksumlength);
  709. if (ret)
  710. goto out_err;
  711. if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
  712. ret = GSS_S_BAD_SIG;
  713. goto out_err;
  714. }
  715. *headskip = kctx->gk5e->conflen;
  716. *tailskip = kctx->gk5e->cksumlength;
  717. out_err:
  718. if (ret && ret != GSS_S_BAD_SIG)
  719. ret = GSS_S_FAILURE;
  720. return ret;
  721. }
  722. /*
  723. * Compute Kseq given the initial session key and the checksum.
  724. * Set the key of the given cipher.
  725. */
  726. int
  727. krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
  728. unsigned char *cksum)
  729. {
  730. struct crypto_hash *hmac;
  731. struct hash_desc desc;
  732. struct scatterlist sg[1];
  733. u8 Kseq[GSS_KRB5_MAX_KEYLEN];
  734. u32 zeroconstant = 0;
  735. int err;
  736. dprintk("%s: entered\n", __func__);
  737. hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
  738. if (IS_ERR(hmac)) {
  739. dprintk("%s: error %ld, allocating hash '%s'\n",
  740. __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
  741. return PTR_ERR(hmac);
  742. }
  743. desc.tfm = hmac;
  744. desc.flags = 0;
  745. err = crypto_hash_init(&desc);
  746. if (err)
  747. goto out_err;
  748. /* Compute intermediate Kseq from session key */
  749. err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
  750. if (err)
  751. goto out_err;
  752. sg_init_one(sg, &zeroconstant, 4);
  753. err = crypto_hash_digest(&desc, sg, 4, Kseq);
  754. if (err)
  755. goto out_err;
  756. /* Compute final Kseq from the checksum and intermediate Kseq */
  757. err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
  758. if (err)
  759. goto out_err;
  760. sg_set_buf(sg, cksum, 8);
  761. err = crypto_hash_digest(&desc, sg, 8, Kseq);
  762. if (err)
  763. goto out_err;
  764. err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
  765. if (err)
  766. goto out_err;
  767. err = 0;
  768. out_err:
  769. crypto_free_hash(hmac);
  770. dprintk("%s: returning %d\n", __func__, err);
  771. return err;
  772. }
  773. /*
  774. * Compute Kcrypt given the initial session key and the plaintext seqnum.
  775. * Set the key of cipher kctx->enc.
  776. */
  777. int
  778. krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
  779. s32 seqnum)
  780. {
  781. struct crypto_hash *hmac;
  782. struct hash_desc desc;
  783. struct scatterlist sg[1];
  784. u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
  785. u8 zeroconstant[4] = {0};
  786. u8 seqnumarray[4];
  787. int err, i;
  788. dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
  789. hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
  790. if (IS_ERR(hmac)) {
  791. dprintk("%s: error %ld, allocating hash '%s'\n",
  792. __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
  793. return PTR_ERR(hmac);
  794. }
  795. desc.tfm = hmac;
  796. desc.flags = 0;
  797. err = crypto_hash_init(&desc);
  798. if (err)
  799. goto out_err;
  800. /* Compute intermediate Kcrypt from session key */
  801. for (i = 0; i < kctx->gk5e->keylength; i++)
  802. Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
  803. err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
  804. if (err)
  805. goto out_err;
  806. sg_init_one(sg, zeroconstant, 4);
  807. err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
  808. if (err)
  809. goto out_err;
  810. /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
  811. err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
  812. if (err)
  813. goto out_err;
  814. seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
  815. seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
  816. seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
  817. seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
  818. sg_set_buf(sg, seqnumarray, 4);
  819. err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
  820. if (err)
  821. goto out_err;
  822. err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
  823. if (err)
  824. goto out_err;
  825. err = 0;
  826. out_err:
  827. crypto_free_hash(hmac);
  828. dprintk("%s: returning %d\n", __func__, err);
  829. return err;
  830. }