core.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/module.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/types.h>
  19. #include <crypto/algapi.h>
  20. #include <crypto/internal/hash.h>
  21. #include <crypto/sha.h>
  22. #include "core.h"
  23. #include "cipher.h"
  24. #include "sha.h"
  25. #define QCE_MAJOR_VERSION5 0x05
  26. #define QCE_QUEUE_LENGTH 1
  27. static const struct qce_algo_ops *qce_ops[] = {
  28. &ablkcipher_ops,
  29. &ahash_ops,
  30. };
  31. static void qce_unregister_algs(struct qce_device *qce)
  32. {
  33. const struct qce_algo_ops *ops;
  34. int i;
  35. for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
  36. ops = qce_ops[i];
  37. ops->unregister_algs(qce);
  38. }
  39. }
  40. static int qce_register_algs(struct qce_device *qce)
  41. {
  42. const struct qce_algo_ops *ops;
  43. int i, ret = -ENODEV;
  44. for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
  45. ops = qce_ops[i];
  46. ret = ops->register_algs(qce);
  47. if (ret)
  48. break;
  49. }
  50. return ret;
  51. }
  52. static int qce_handle_request(struct crypto_async_request *async_req)
  53. {
  54. int ret = -EINVAL, i;
  55. const struct qce_algo_ops *ops;
  56. u32 type = crypto_tfm_alg_type(async_req->tfm);
  57. for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
  58. ops = qce_ops[i];
  59. if (type != ops->type)
  60. continue;
  61. ret = ops->async_req_handle(async_req);
  62. break;
  63. }
  64. return ret;
  65. }
  66. static int qce_handle_queue(struct qce_device *qce,
  67. struct crypto_async_request *req)
  68. {
  69. struct crypto_async_request *async_req, *backlog;
  70. unsigned long flags;
  71. int ret = 0, err;
  72. spin_lock_irqsave(&qce->lock, flags);
  73. if (req)
  74. ret = crypto_enqueue_request(&qce->queue, req);
  75. /* busy, do not dequeue request */
  76. if (qce->req) {
  77. spin_unlock_irqrestore(&qce->lock, flags);
  78. return ret;
  79. }
  80. backlog = crypto_get_backlog(&qce->queue);
  81. async_req = crypto_dequeue_request(&qce->queue);
  82. if (async_req)
  83. qce->req = async_req;
  84. spin_unlock_irqrestore(&qce->lock, flags);
  85. if (!async_req)
  86. return ret;
  87. if (backlog) {
  88. spin_lock_bh(&qce->lock);
  89. backlog->complete(backlog, -EINPROGRESS);
  90. spin_unlock_bh(&qce->lock);
  91. }
  92. err = qce_handle_request(async_req);
  93. if (err) {
  94. qce->result = err;
  95. tasklet_schedule(&qce->done_tasklet);
  96. }
  97. return ret;
  98. }
  99. static void qce_tasklet_req_done(unsigned long data)
  100. {
  101. struct qce_device *qce = (struct qce_device *)data;
  102. struct crypto_async_request *req;
  103. unsigned long flags;
  104. spin_lock_irqsave(&qce->lock, flags);
  105. req = qce->req;
  106. qce->req = NULL;
  107. spin_unlock_irqrestore(&qce->lock, flags);
  108. if (req)
  109. req->complete(req, qce->result);
  110. qce_handle_queue(qce, NULL);
  111. }
  112. static int qce_async_request_enqueue(struct qce_device *qce,
  113. struct crypto_async_request *req)
  114. {
  115. return qce_handle_queue(qce, req);
  116. }
  117. static void qce_async_request_done(struct qce_device *qce, int ret)
  118. {
  119. qce->result = ret;
  120. tasklet_schedule(&qce->done_tasklet);
  121. }
  122. static int qce_check_version(struct qce_device *qce)
  123. {
  124. u32 major, minor, step;
  125. qce_get_version(qce, &major, &minor, &step);
  126. /*
  127. * the driver does not support v5 with minor 0 because it has special
  128. * alignment requirements.
  129. */
  130. if (major != QCE_MAJOR_VERSION5 || minor == 0)
  131. return -ENODEV;
  132. qce->burst_size = QCE_BAM_BURST_SIZE;
  133. qce->pipe_pair_id = 1;
  134. dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
  135. major, minor, step);
  136. return 0;
  137. }
  138. static int qce_crypto_probe(struct platform_device *pdev)
  139. {
  140. struct device *dev = &pdev->dev;
  141. struct qce_device *qce;
  142. struct resource *res;
  143. int ret;
  144. qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
  145. if (!qce)
  146. return -ENOMEM;
  147. qce->dev = dev;
  148. platform_set_drvdata(pdev, qce);
  149. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  150. qce->base = devm_ioremap_resource(&pdev->dev, res);
  151. if (IS_ERR(qce->base))
  152. return PTR_ERR(qce->base);
  153. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  154. if (ret < 0)
  155. return ret;
  156. qce->core = devm_clk_get(qce->dev, "core");
  157. if (IS_ERR(qce->core))
  158. return PTR_ERR(qce->core);
  159. qce->iface = devm_clk_get(qce->dev, "iface");
  160. if (IS_ERR(qce->iface))
  161. return PTR_ERR(qce->iface);
  162. qce->bus = devm_clk_get(qce->dev, "bus");
  163. if (IS_ERR(qce->bus))
  164. return PTR_ERR(qce->bus);
  165. ret = clk_prepare_enable(qce->core);
  166. if (ret)
  167. return ret;
  168. ret = clk_prepare_enable(qce->iface);
  169. if (ret)
  170. goto err_clks_core;
  171. ret = clk_prepare_enable(qce->bus);
  172. if (ret)
  173. goto err_clks_iface;
  174. ret = qce_dma_request(qce->dev, &qce->dma);
  175. if (ret)
  176. goto err_clks;
  177. ret = qce_check_version(qce);
  178. if (ret)
  179. goto err_clks;
  180. spin_lock_init(&qce->lock);
  181. tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
  182. (unsigned long)qce);
  183. crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
  184. qce->async_req_enqueue = qce_async_request_enqueue;
  185. qce->async_req_done = qce_async_request_done;
  186. ret = qce_register_algs(qce);
  187. if (ret)
  188. goto err_dma;
  189. return 0;
  190. err_dma:
  191. qce_dma_release(&qce->dma);
  192. err_clks:
  193. clk_disable_unprepare(qce->bus);
  194. err_clks_iface:
  195. clk_disable_unprepare(qce->iface);
  196. err_clks_core:
  197. clk_disable_unprepare(qce->core);
  198. return ret;
  199. }
  200. static int qce_crypto_remove(struct platform_device *pdev)
  201. {
  202. struct qce_device *qce = platform_get_drvdata(pdev);
  203. tasklet_kill(&qce->done_tasklet);
  204. qce_unregister_algs(qce);
  205. qce_dma_release(&qce->dma);
  206. clk_disable_unprepare(qce->bus);
  207. clk_disable_unprepare(qce->iface);
  208. clk_disable_unprepare(qce->core);
  209. return 0;
  210. }
  211. static const struct of_device_id qce_crypto_of_match[] = {
  212. { .compatible = "qcom,crypto-v5.1", },
  213. {}
  214. };
  215. MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
  216. static struct platform_driver qce_crypto_driver = {
  217. .probe = qce_crypto_probe,
  218. .remove = qce_crypto_remove,
  219. .driver = {
  220. .name = KBUILD_MODNAME,
  221. .of_match_table = qce_crypto_of_match,
  222. },
  223. };
  224. module_platform_driver(qce_crypto_driver);
  225. MODULE_LICENSE("GPL v2");
  226. MODULE_DESCRIPTION("Qualcomm crypto engine driver");
  227. MODULE_ALIAS("platform:" KBUILD_MODNAME);
  228. MODULE_AUTHOR("The Linux Foundation");