ccp-platform.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2014 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/device.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/ioport.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/kthread.h>
  19. #include <linux/sched.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/delay.h>
  23. #include <linux/ccp.h>
  24. #include <linux/of.h>
  25. #include <linux/of_address.h>
  26. #include <linux/acpi.h>
  27. #include "ccp-dev.h"
  28. struct ccp_platform {
  29. int coherent;
  30. };
  31. static int ccp_get_irq(struct ccp_device *ccp)
  32. {
  33. struct device *dev = ccp->dev;
  34. struct platform_device *pdev = container_of(dev,
  35. struct platform_device, dev);
  36. int ret;
  37. ret = platform_get_irq(pdev, 0);
  38. if (ret < 0)
  39. return ret;
  40. ccp->irq = ret;
  41. ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
  42. if (ret) {
  43. dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
  44. return ret;
  45. }
  46. return 0;
  47. }
  48. static int ccp_get_irqs(struct ccp_device *ccp)
  49. {
  50. struct device *dev = ccp->dev;
  51. int ret;
  52. ret = ccp_get_irq(ccp);
  53. if (!ret)
  54. return 0;
  55. /* Couldn't get an interrupt */
  56. dev_notice(dev, "could not enable interrupts (%d)\n", ret);
  57. return ret;
  58. }
  59. static void ccp_free_irqs(struct ccp_device *ccp)
  60. {
  61. struct device *dev = ccp->dev;
  62. free_irq(ccp->irq, dev);
  63. }
  64. static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
  65. {
  66. struct device *dev = ccp->dev;
  67. struct platform_device *pdev = container_of(dev,
  68. struct platform_device, dev);
  69. struct resource *ior;
  70. ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  71. if (ior && (resource_size(ior) >= 0x800))
  72. return ior;
  73. return NULL;
  74. }
  75. static int ccp_platform_probe(struct platform_device *pdev)
  76. {
  77. struct ccp_device *ccp;
  78. struct ccp_platform *ccp_platform;
  79. struct device *dev = &pdev->dev;
  80. enum dev_dma_attr attr;
  81. struct resource *ior;
  82. int ret;
  83. ret = -ENOMEM;
  84. ccp = ccp_alloc_struct(dev);
  85. if (!ccp)
  86. goto e_err;
  87. ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL);
  88. if (!ccp_platform)
  89. goto e_err;
  90. ccp->dev_specific = ccp_platform;
  91. ccp->get_irq = ccp_get_irqs;
  92. ccp->free_irq = ccp_free_irqs;
  93. ior = ccp_find_mmio_area(ccp);
  94. ccp->io_map = devm_ioremap_resource(dev, ior);
  95. if (IS_ERR(ccp->io_map)) {
  96. ret = PTR_ERR(ccp->io_map);
  97. goto e_err;
  98. }
  99. ccp->io_regs = ccp->io_map;
  100. attr = device_get_dma_attr(dev);
  101. if (attr == DEV_DMA_NOT_SUPPORTED) {
  102. dev_err(dev, "DMA is not supported");
  103. goto e_err;
  104. }
  105. ccp_platform->coherent = (attr == DEV_DMA_COHERENT);
  106. if (ccp_platform->coherent)
  107. ccp->axcache = CACHE_WB_NO_ALLOC;
  108. else
  109. ccp->axcache = CACHE_NONE;
  110. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
  111. if (ret) {
  112. dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
  113. goto e_err;
  114. }
  115. dev_set_drvdata(dev, ccp);
  116. ret = ccp_init(ccp);
  117. if (ret)
  118. goto e_err;
  119. dev_notice(dev, "enabled\n");
  120. return 0;
  121. e_err:
  122. dev_notice(dev, "initialization failed\n");
  123. return ret;
  124. }
  125. static int ccp_platform_remove(struct platform_device *pdev)
  126. {
  127. struct device *dev = &pdev->dev;
  128. struct ccp_device *ccp = dev_get_drvdata(dev);
  129. ccp_destroy(ccp);
  130. dev_notice(dev, "disabled\n");
  131. return 0;
  132. }
  133. #ifdef CONFIG_PM
  134. static int ccp_platform_suspend(struct platform_device *pdev,
  135. pm_message_t state)
  136. {
  137. struct device *dev = &pdev->dev;
  138. struct ccp_device *ccp = dev_get_drvdata(dev);
  139. unsigned long flags;
  140. unsigned int i;
  141. spin_lock_irqsave(&ccp->cmd_lock, flags);
  142. ccp->suspending = 1;
  143. /* Wake all the queue kthreads to prepare for suspend */
  144. for (i = 0; i < ccp->cmd_q_count; i++)
  145. wake_up_process(ccp->cmd_q[i].kthread);
  146. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  147. /* Wait for all queue kthreads to say they're done */
  148. while (!ccp_queues_suspended(ccp))
  149. wait_event_interruptible(ccp->suspend_queue,
  150. ccp_queues_suspended(ccp));
  151. return 0;
  152. }
  153. static int ccp_platform_resume(struct platform_device *pdev)
  154. {
  155. struct device *dev = &pdev->dev;
  156. struct ccp_device *ccp = dev_get_drvdata(dev);
  157. unsigned long flags;
  158. unsigned int i;
  159. spin_lock_irqsave(&ccp->cmd_lock, flags);
  160. ccp->suspending = 0;
  161. /* Wake up all the kthreads */
  162. for (i = 0; i < ccp->cmd_q_count; i++) {
  163. ccp->cmd_q[i].suspended = 0;
  164. wake_up_process(ccp->cmd_q[i].kthread);
  165. }
  166. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  167. return 0;
  168. }
  169. #endif
  170. #ifdef CONFIG_ACPI
  171. static const struct acpi_device_id ccp_acpi_match[] = {
  172. { "AMDI0C00", 0 },
  173. { },
  174. };
  175. MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
  176. #endif
  177. #ifdef CONFIG_OF
  178. static const struct of_device_id ccp_of_match[] = {
  179. { .compatible = "amd,ccp-seattle-v1a" },
  180. { },
  181. };
  182. MODULE_DEVICE_TABLE(of, ccp_of_match);
  183. #endif
  184. static struct platform_driver ccp_platform_driver = {
  185. .driver = {
  186. .name = "ccp",
  187. #ifdef CONFIG_ACPI
  188. .acpi_match_table = ccp_acpi_match,
  189. #endif
  190. #ifdef CONFIG_OF
  191. .of_match_table = ccp_of_match,
  192. #endif
  193. },
  194. .probe = ccp_platform_probe,
  195. .remove = ccp_platform_remove,
  196. #ifdef CONFIG_PM
  197. .suspend = ccp_platform_suspend,
  198. .resume = ccp_platform_resume,
  199. #endif
  200. };
  201. int ccp_platform_init(void)
  202. {
  203. return platform_driver_register(&ccp_platform_driver);
  204. }
  205. void ccp_platform_exit(void)
  206. {
  207. platform_driver_unregister(&ccp_platform_driver);
  208. }