geode-aes.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License as published by
  5. * the Free Software Foundation; either version 2 of the License, or
  6. * (at your option) any later version.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/pci.h>
  11. #include <linux/pci_ids.h>
  12. #include <linux/crypto.h>
  13. #include <linux/spinlock.h>
  14. #include <crypto/algapi.h>
  15. #include <crypto/aes.h>
  16. #include <linux/io.h>
  17. #include <linux/delay.h>
  18. #include "geode-aes.h"
  19. /* Static structures */
  20. static void __iomem *_iobase;
  21. static spinlock_t lock;
  22. /* Write a 128 bit field (either a writable key or IV) */
  23. static inline void
  24. _writefield(u32 offset, void *value)
  25. {
  26. int i;
  27. for (i = 0; i < 4; i++)
  28. iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
  29. }
  30. /* Read a 128 bit field (either a writable key or IV) */
  31. static inline void
  32. _readfield(u32 offset, void *value)
  33. {
  34. int i;
  35. for (i = 0; i < 4; i++)
  36. ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
  37. }
  38. static int
  39. do_crypt(void *src, void *dst, int len, u32 flags)
  40. {
  41. u32 status;
  42. u32 counter = AES_OP_TIMEOUT;
  43. iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
  44. iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
  45. iowrite32(len, _iobase + AES_LENA_REG);
  46. /* Start the operation */
  47. iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
  48. do {
  49. status = ioread32(_iobase + AES_INTR_REG);
  50. cpu_relax();
  51. } while (!(status & AES_INTRA_PENDING) && --counter);
  52. /* Clear the event */
  53. iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
  54. return counter ? 0 : 1;
  55. }
  56. static unsigned int
  57. geode_aes_crypt(struct geode_aes_op *op)
  58. {
  59. u32 flags = 0;
  60. unsigned long iflags;
  61. int ret;
  62. if (op->len == 0)
  63. return 0;
  64. /* If the source and destination is the same, then
  65. * we need to turn on the coherent flags, otherwise
  66. * we don't need to worry
  67. */
  68. flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
  69. if (op->dir == AES_DIR_ENCRYPT)
  70. flags |= AES_CTRL_ENCRYPT;
  71. /* Start the critical section */
  72. spin_lock_irqsave(&lock, iflags);
  73. if (op->mode == AES_MODE_CBC) {
  74. flags |= AES_CTRL_CBC;
  75. _writefield(AES_WRITEIV0_REG, op->iv);
  76. }
  77. if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
  78. flags |= AES_CTRL_WRKEY;
  79. _writefield(AES_WRITEKEY0_REG, op->key);
  80. }
  81. ret = do_crypt(op->src, op->dst, op->len, flags);
  82. BUG_ON(ret);
  83. if (op->mode == AES_MODE_CBC)
  84. _readfield(AES_WRITEIV0_REG, op->iv);
  85. spin_unlock_irqrestore(&lock, iflags);
  86. return op->len;
  87. }
  88. /* CRYPTO-API Functions */
  89. static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
  90. unsigned int len)
  91. {
  92. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  93. unsigned int ret;
  94. op->keylen = len;
  95. if (len == AES_KEYSIZE_128) {
  96. memcpy(op->key, key, len);
  97. return 0;
  98. }
  99. if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
  100. /* not supported at all */
  101. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  102. return -EINVAL;
  103. }
  104. /*
  105. * The requested key size is not supported by HW, do a fallback
  106. */
  107. op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  108. op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
  109. ret = crypto_cipher_setkey(op->fallback.cip, key, len);
  110. if (ret) {
  111. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  112. tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
  113. }
  114. return ret;
  115. }
  116. static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
  117. unsigned int len)
  118. {
  119. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  120. unsigned int ret;
  121. op->keylen = len;
  122. if (len == AES_KEYSIZE_128) {
  123. memcpy(op->key, key, len);
  124. return 0;
  125. }
  126. if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
  127. /* not supported at all */
  128. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  129. return -EINVAL;
  130. }
  131. /*
  132. * The requested key size is not supported by HW, do a fallback
  133. */
  134. op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  135. op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
  136. ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
  137. if (ret) {
  138. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  139. tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
  140. }
  141. return ret;
  142. }
  143. static int fallback_blk_dec(struct blkcipher_desc *desc,
  144. struct scatterlist *dst, struct scatterlist *src,
  145. unsigned int nbytes)
  146. {
  147. unsigned int ret;
  148. struct crypto_blkcipher *tfm;
  149. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  150. tfm = desc->tfm;
  151. desc->tfm = op->fallback.blk;
  152. ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
  153. desc->tfm = tfm;
  154. return ret;
  155. }
  156. static int fallback_blk_enc(struct blkcipher_desc *desc,
  157. struct scatterlist *dst, struct scatterlist *src,
  158. unsigned int nbytes)
  159. {
  160. unsigned int ret;
  161. struct crypto_blkcipher *tfm;
  162. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  163. tfm = desc->tfm;
  164. desc->tfm = op->fallback.blk;
  165. ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
  166. desc->tfm = tfm;
  167. return ret;
  168. }
  169. static void
  170. geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  171. {
  172. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  173. if (unlikely(op->keylen != AES_KEYSIZE_128)) {
  174. crypto_cipher_encrypt_one(op->fallback.cip, out, in);
  175. return;
  176. }
  177. op->src = (void *) in;
  178. op->dst = (void *) out;
  179. op->mode = AES_MODE_ECB;
  180. op->flags = 0;
  181. op->len = AES_BLOCK_SIZE;
  182. op->dir = AES_DIR_ENCRYPT;
  183. geode_aes_crypt(op);
  184. }
  185. static void
  186. geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  187. {
  188. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  189. if (unlikely(op->keylen != AES_KEYSIZE_128)) {
  190. crypto_cipher_decrypt_one(op->fallback.cip, out, in);
  191. return;
  192. }
  193. op->src = (void *) in;
  194. op->dst = (void *) out;
  195. op->mode = AES_MODE_ECB;
  196. op->flags = 0;
  197. op->len = AES_BLOCK_SIZE;
  198. op->dir = AES_DIR_DECRYPT;
  199. geode_aes_crypt(op);
  200. }
  201. static int fallback_init_cip(struct crypto_tfm *tfm)
  202. {
  203. const char *name = crypto_tfm_alg_name(tfm);
  204. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  205. op->fallback.cip = crypto_alloc_cipher(name, 0,
  206. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  207. if (IS_ERR(op->fallback.cip)) {
  208. printk(KERN_ERR "Error allocating fallback algo %s\n", name);
  209. return PTR_ERR(op->fallback.cip);
  210. }
  211. return 0;
  212. }
  213. static void fallback_exit_cip(struct crypto_tfm *tfm)
  214. {
  215. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  216. crypto_free_cipher(op->fallback.cip);
  217. op->fallback.cip = NULL;
  218. }
  219. static struct crypto_alg geode_alg = {
  220. .cra_name = "aes",
  221. .cra_driver_name = "geode-aes",
  222. .cra_priority = 300,
  223. .cra_alignmask = 15,
  224. .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
  225. CRYPTO_ALG_NEED_FALLBACK,
  226. .cra_init = fallback_init_cip,
  227. .cra_exit = fallback_exit_cip,
  228. .cra_blocksize = AES_BLOCK_SIZE,
  229. .cra_ctxsize = sizeof(struct geode_aes_op),
  230. .cra_module = THIS_MODULE,
  231. .cra_u = {
  232. .cipher = {
  233. .cia_min_keysize = AES_MIN_KEY_SIZE,
  234. .cia_max_keysize = AES_MAX_KEY_SIZE,
  235. .cia_setkey = geode_setkey_cip,
  236. .cia_encrypt = geode_encrypt,
  237. .cia_decrypt = geode_decrypt
  238. }
  239. }
  240. };
  241. static int
  242. geode_cbc_decrypt(struct blkcipher_desc *desc,
  243. struct scatterlist *dst, struct scatterlist *src,
  244. unsigned int nbytes)
  245. {
  246. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  247. struct blkcipher_walk walk;
  248. int err, ret;
  249. if (unlikely(op->keylen != AES_KEYSIZE_128))
  250. return fallback_blk_dec(desc, dst, src, nbytes);
  251. blkcipher_walk_init(&walk, dst, src, nbytes);
  252. err = blkcipher_walk_virt(desc, &walk);
  253. op->iv = walk.iv;
  254. while ((nbytes = walk.nbytes)) {
  255. op->src = walk.src.virt.addr,
  256. op->dst = walk.dst.virt.addr;
  257. op->mode = AES_MODE_CBC;
  258. op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
  259. op->dir = AES_DIR_DECRYPT;
  260. ret = geode_aes_crypt(op);
  261. nbytes -= ret;
  262. err = blkcipher_walk_done(desc, &walk, nbytes);
  263. }
  264. return err;
  265. }
  266. static int
  267. geode_cbc_encrypt(struct blkcipher_desc *desc,
  268. struct scatterlist *dst, struct scatterlist *src,
  269. unsigned int nbytes)
  270. {
  271. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  272. struct blkcipher_walk walk;
  273. int err, ret;
  274. if (unlikely(op->keylen != AES_KEYSIZE_128))
  275. return fallback_blk_enc(desc, dst, src, nbytes);
  276. blkcipher_walk_init(&walk, dst, src, nbytes);
  277. err = blkcipher_walk_virt(desc, &walk);
  278. op->iv = walk.iv;
  279. while ((nbytes = walk.nbytes)) {
  280. op->src = walk.src.virt.addr,
  281. op->dst = walk.dst.virt.addr;
  282. op->mode = AES_MODE_CBC;
  283. op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
  284. op->dir = AES_DIR_ENCRYPT;
  285. ret = geode_aes_crypt(op);
  286. nbytes -= ret;
  287. err = blkcipher_walk_done(desc, &walk, nbytes);
  288. }
  289. return err;
  290. }
  291. static int fallback_init_blk(struct crypto_tfm *tfm)
  292. {
  293. const char *name = crypto_tfm_alg_name(tfm);
  294. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  295. op->fallback.blk = crypto_alloc_blkcipher(name, 0,
  296. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  297. if (IS_ERR(op->fallback.blk)) {
  298. printk(KERN_ERR "Error allocating fallback algo %s\n", name);
  299. return PTR_ERR(op->fallback.blk);
  300. }
  301. return 0;
  302. }
  303. static void fallback_exit_blk(struct crypto_tfm *tfm)
  304. {
  305. struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  306. crypto_free_blkcipher(op->fallback.blk);
  307. op->fallback.blk = NULL;
  308. }
  309. static struct crypto_alg geode_cbc_alg = {
  310. .cra_name = "cbc(aes)",
  311. .cra_driver_name = "cbc-aes-geode",
  312. .cra_priority = 400,
  313. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  314. CRYPTO_ALG_KERN_DRIVER_ONLY |
  315. CRYPTO_ALG_NEED_FALLBACK,
  316. .cra_init = fallback_init_blk,
  317. .cra_exit = fallback_exit_blk,
  318. .cra_blocksize = AES_BLOCK_SIZE,
  319. .cra_ctxsize = sizeof(struct geode_aes_op),
  320. .cra_alignmask = 15,
  321. .cra_type = &crypto_blkcipher_type,
  322. .cra_module = THIS_MODULE,
  323. .cra_u = {
  324. .blkcipher = {
  325. .min_keysize = AES_MIN_KEY_SIZE,
  326. .max_keysize = AES_MAX_KEY_SIZE,
  327. .setkey = geode_setkey_blk,
  328. .encrypt = geode_cbc_encrypt,
  329. .decrypt = geode_cbc_decrypt,
  330. .ivsize = AES_BLOCK_SIZE,
  331. }
  332. }
  333. };
  334. static int
  335. geode_ecb_decrypt(struct blkcipher_desc *desc,
  336. struct scatterlist *dst, struct scatterlist *src,
  337. unsigned int nbytes)
  338. {
  339. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  340. struct blkcipher_walk walk;
  341. int err, ret;
  342. if (unlikely(op->keylen != AES_KEYSIZE_128))
  343. return fallback_blk_dec(desc, dst, src, nbytes);
  344. blkcipher_walk_init(&walk, dst, src, nbytes);
  345. err = blkcipher_walk_virt(desc, &walk);
  346. while ((nbytes = walk.nbytes)) {
  347. op->src = walk.src.virt.addr,
  348. op->dst = walk.dst.virt.addr;
  349. op->mode = AES_MODE_ECB;
  350. op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
  351. op->dir = AES_DIR_DECRYPT;
  352. ret = geode_aes_crypt(op);
  353. nbytes -= ret;
  354. err = blkcipher_walk_done(desc, &walk, nbytes);
  355. }
  356. return err;
  357. }
  358. static int
  359. geode_ecb_encrypt(struct blkcipher_desc *desc,
  360. struct scatterlist *dst, struct scatterlist *src,
  361. unsigned int nbytes)
  362. {
  363. struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
  364. struct blkcipher_walk walk;
  365. int err, ret;
  366. if (unlikely(op->keylen != AES_KEYSIZE_128))
  367. return fallback_blk_enc(desc, dst, src, nbytes);
  368. blkcipher_walk_init(&walk, dst, src, nbytes);
  369. err = blkcipher_walk_virt(desc, &walk);
  370. while ((nbytes = walk.nbytes)) {
  371. op->src = walk.src.virt.addr,
  372. op->dst = walk.dst.virt.addr;
  373. op->mode = AES_MODE_ECB;
  374. op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
  375. op->dir = AES_DIR_ENCRYPT;
  376. ret = geode_aes_crypt(op);
  377. nbytes -= ret;
  378. ret = blkcipher_walk_done(desc, &walk, nbytes);
  379. }
  380. return err;
  381. }
  382. static struct crypto_alg geode_ecb_alg = {
  383. .cra_name = "ecb(aes)",
  384. .cra_driver_name = "ecb-aes-geode",
  385. .cra_priority = 400,
  386. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  387. CRYPTO_ALG_KERN_DRIVER_ONLY |
  388. CRYPTO_ALG_NEED_FALLBACK,
  389. .cra_init = fallback_init_blk,
  390. .cra_exit = fallback_exit_blk,
  391. .cra_blocksize = AES_BLOCK_SIZE,
  392. .cra_ctxsize = sizeof(struct geode_aes_op),
  393. .cra_alignmask = 15,
  394. .cra_type = &crypto_blkcipher_type,
  395. .cra_module = THIS_MODULE,
  396. .cra_u = {
  397. .blkcipher = {
  398. .min_keysize = AES_MIN_KEY_SIZE,
  399. .max_keysize = AES_MAX_KEY_SIZE,
  400. .setkey = geode_setkey_blk,
  401. .encrypt = geode_ecb_encrypt,
  402. .decrypt = geode_ecb_decrypt,
  403. }
  404. }
  405. };
  406. static void geode_aes_remove(struct pci_dev *dev)
  407. {
  408. crypto_unregister_alg(&geode_alg);
  409. crypto_unregister_alg(&geode_ecb_alg);
  410. crypto_unregister_alg(&geode_cbc_alg);
  411. pci_iounmap(dev, _iobase);
  412. _iobase = NULL;
  413. pci_release_regions(dev);
  414. pci_disable_device(dev);
  415. }
  416. static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
  417. {
  418. int ret;
  419. ret = pci_enable_device(dev);
  420. if (ret)
  421. return ret;
  422. ret = pci_request_regions(dev, "geode-aes");
  423. if (ret)
  424. goto eenable;
  425. _iobase = pci_iomap(dev, 0, 0);
  426. if (_iobase == NULL) {
  427. ret = -ENOMEM;
  428. goto erequest;
  429. }
  430. spin_lock_init(&lock);
  431. /* Clear any pending activity */
  432. iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
  433. ret = crypto_register_alg(&geode_alg);
  434. if (ret)
  435. goto eiomap;
  436. ret = crypto_register_alg(&geode_ecb_alg);
  437. if (ret)
  438. goto ealg;
  439. ret = crypto_register_alg(&geode_cbc_alg);
  440. if (ret)
  441. goto eecb;
  442. dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
  443. return 0;
  444. eecb:
  445. crypto_unregister_alg(&geode_ecb_alg);
  446. ealg:
  447. crypto_unregister_alg(&geode_alg);
  448. eiomap:
  449. pci_iounmap(dev, _iobase);
  450. erequest:
  451. pci_release_regions(dev);
  452. eenable:
  453. pci_disable_device(dev);
  454. dev_err(&dev->dev, "GEODE AES initialization failed.\n");
  455. return ret;
  456. }
  457. static struct pci_device_id geode_aes_tbl[] = {
  458. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
  459. { 0, }
  460. };
  461. MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
  462. static struct pci_driver geode_aes_driver = {
  463. .name = "Geode LX AES",
  464. .id_table = geode_aes_tbl,
  465. .probe = geode_aes_probe,
  466. .remove = geode_aes_remove,
  467. };
  468. module_pci_driver(geode_aes_driver);
  469. MODULE_AUTHOR("Advanced Micro Devices, Inc.");
  470. MODULE_DESCRIPTION("Geode LX Hardware AES driver");
  471. MODULE_LICENSE("GPL");