ccp-pci.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2013 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/device.h>
  15. #include <linux/pci.h>
  16. #include <linux/pci_ids.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/kthread.h>
  19. #include <linux/sched.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/delay.h>
  23. #include <linux/ccp.h>
  24. #include "ccp-dev.h"
  25. #define IO_BAR 2
  26. #define IO_OFFSET 0x20000
  27. #define MSIX_VECTORS 2
  28. struct ccp_msix {
  29. u32 vector;
  30. char name[16];
  31. };
  32. struct ccp_pci {
  33. int msix_count;
  34. struct ccp_msix msix[MSIX_VECTORS];
  35. };
  36. static int ccp_get_msix_irqs(struct ccp_device *ccp)
  37. {
  38. struct ccp_pci *ccp_pci = ccp->dev_specific;
  39. struct device *dev = ccp->dev;
  40. struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
  41. struct msix_entry msix_entry[MSIX_VECTORS];
  42. unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
  43. int v, ret;
  44. for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
  45. msix_entry[v].entry = v;
  46. ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
  47. if (ret < 0)
  48. return ret;
  49. ccp_pci->msix_count = ret;
  50. for (v = 0; v < ccp_pci->msix_count; v++) {
  51. /* Set the interrupt names and request the irqs */
  52. snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
  53. ccp_pci->msix[v].vector = msix_entry[v].vector;
  54. ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler,
  55. 0, ccp_pci->msix[v].name, dev);
  56. if (ret) {
  57. dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
  58. ret);
  59. goto e_irq;
  60. }
  61. }
  62. return 0;
  63. e_irq:
  64. while (v--)
  65. free_irq(ccp_pci->msix[v].vector, dev);
  66. pci_disable_msix(pdev);
  67. ccp_pci->msix_count = 0;
  68. return ret;
  69. }
  70. static int ccp_get_msi_irq(struct ccp_device *ccp)
  71. {
  72. struct device *dev = ccp->dev;
  73. struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
  74. int ret;
  75. ret = pci_enable_msi(pdev);
  76. if (ret)
  77. return ret;
  78. ccp->irq = pdev->irq;
  79. ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
  80. if (ret) {
  81. dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
  82. goto e_msi;
  83. }
  84. return 0;
  85. e_msi:
  86. pci_disable_msi(pdev);
  87. return ret;
  88. }
  89. static int ccp_get_irqs(struct ccp_device *ccp)
  90. {
  91. struct device *dev = ccp->dev;
  92. int ret;
  93. ret = ccp_get_msix_irqs(ccp);
  94. if (!ret)
  95. return 0;
  96. /* Couldn't get MSI-X vectors, try MSI */
  97. dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
  98. ret = ccp_get_msi_irq(ccp);
  99. if (!ret)
  100. return 0;
  101. /* Couldn't get MSI interrupt */
  102. dev_notice(dev, "could not enable MSI (%d)\n", ret);
  103. return ret;
  104. }
  105. static void ccp_free_irqs(struct ccp_device *ccp)
  106. {
  107. struct ccp_pci *ccp_pci = ccp->dev_specific;
  108. struct device *dev = ccp->dev;
  109. struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
  110. if (ccp_pci->msix_count) {
  111. while (ccp_pci->msix_count--)
  112. free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
  113. dev);
  114. pci_disable_msix(pdev);
  115. } else {
  116. free_irq(ccp->irq, dev);
  117. pci_disable_msi(pdev);
  118. }
  119. }
  120. static int ccp_find_mmio_area(struct ccp_device *ccp)
  121. {
  122. struct device *dev = ccp->dev;
  123. struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
  124. resource_size_t io_len;
  125. unsigned long io_flags;
  126. io_flags = pci_resource_flags(pdev, IO_BAR);
  127. io_len = pci_resource_len(pdev, IO_BAR);
  128. if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
  129. return IO_BAR;
  130. return -EIO;
  131. }
  132. static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  133. {
  134. struct ccp_device *ccp;
  135. struct ccp_pci *ccp_pci;
  136. struct device *dev = &pdev->dev;
  137. unsigned int bar;
  138. int ret;
  139. ret = -ENOMEM;
  140. ccp = ccp_alloc_struct(dev);
  141. if (!ccp)
  142. goto e_err;
  143. ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
  144. if (!ccp_pci)
  145. goto e_err;
  146. ccp->dev_specific = ccp_pci;
  147. ccp->get_irq = ccp_get_irqs;
  148. ccp->free_irq = ccp_free_irqs;
  149. ret = pci_request_regions(pdev, "ccp");
  150. if (ret) {
  151. dev_err(dev, "pci_request_regions failed (%d)\n", ret);
  152. goto e_err;
  153. }
  154. ret = pci_enable_device(pdev);
  155. if (ret) {
  156. dev_err(dev, "pci_enable_device failed (%d)\n", ret);
  157. goto e_regions;
  158. }
  159. pci_set_master(pdev);
  160. ret = ccp_find_mmio_area(ccp);
  161. if (ret < 0)
  162. goto e_device;
  163. bar = ret;
  164. ret = -EIO;
  165. ccp->io_map = pci_iomap(pdev, bar, 0);
  166. if (!ccp->io_map) {
  167. dev_err(dev, "pci_iomap failed\n");
  168. goto e_device;
  169. }
  170. ccp->io_regs = ccp->io_map + IO_OFFSET;
  171. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
  172. if (ret) {
  173. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  174. if (ret) {
  175. dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
  176. ret);
  177. goto e_iomap;
  178. }
  179. }
  180. dev_set_drvdata(dev, ccp);
  181. ret = ccp_init(ccp);
  182. if (ret)
  183. goto e_iomap;
  184. dev_notice(dev, "enabled\n");
  185. return 0;
  186. e_iomap:
  187. pci_iounmap(pdev, ccp->io_map);
  188. e_device:
  189. pci_disable_device(pdev);
  190. e_regions:
  191. pci_release_regions(pdev);
  192. e_err:
  193. dev_notice(dev, "initialization failed\n");
  194. return ret;
  195. }
  196. static void ccp_pci_remove(struct pci_dev *pdev)
  197. {
  198. struct device *dev = &pdev->dev;
  199. struct ccp_device *ccp = dev_get_drvdata(dev);
  200. if (!ccp)
  201. return;
  202. ccp_destroy(ccp);
  203. pci_iounmap(pdev, ccp->io_map);
  204. pci_disable_device(pdev);
  205. pci_release_regions(pdev);
  206. dev_notice(dev, "disabled\n");
  207. }
  208. #ifdef CONFIG_PM
  209. static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  210. {
  211. struct device *dev = &pdev->dev;
  212. struct ccp_device *ccp = dev_get_drvdata(dev);
  213. unsigned long flags;
  214. unsigned int i;
  215. spin_lock_irqsave(&ccp->cmd_lock, flags);
  216. ccp->suspending = 1;
  217. /* Wake all the queue kthreads to prepare for suspend */
  218. for (i = 0; i < ccp->cmd_q_count; i++)
  219. wake_up_process(ccp->cmd_q[i].kthread);
  220. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  221. /* Wait for all queue kthreads to say they're done */
  222. while (!ccp_queues_suspended(ccp))
  223. wait_event_interruptible(ccp->suspend_queue,
  224. ccp_queues_suspended(ccp));
  225. return 0;
  226. }
  227. static int ccp_pci_resume(struct pci_dev *pdev)
  228. {
  229. struct device *dev = &pdev->dev;
  230. struct ccp_device *ccp = dev_get_drvdata(dev);
  231. unsigned long flags;
  232. unsigned int i;
  233. spin_lock_irqsave(&ccp->cmd_lock, flags);
  234. ccp->suspending = 0;
  235. /* Wake up all the kthreads */
  236. for (i = 0; i < ccp->cmd_q_count; i++) {
  237. ccp->cmd_q[i].suspended = 0;
  238. wake_up_process(ccp->cmd_q[i].kthread);
  239. }
  240. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  241. return 0;
  242. }
  243. #endif
  244. static const struct pci_device_id ccp_pci_table[] = {
  245. { PCI_VDEVICE(AMD, 0x1537), },
  246. /* Last entry must be zero */
  247. { 0, }
  248. };
  249. MODULE_DEVICE_TABLE(pci, ccp_pci_table);
  250. static struct pci_driver ccp_pci_driver = {
  251. .name = "ccp",
  252. .id_table = ccp_pci_table,
  253. .probe = ccp_pci_probe,
  254. .remove = ccp_pci_remove,
  255. #ifdef CONFIG_PM
  256. .suspend = ccp_pci_suspend,
  257. .resume = ccp_pci_resume,
  258. #endif
  259. };
  260. int ccp_pci_init(void)
  261. {
  262. return pci_register_driver(&ccp_pci_driver);
  263. }
  264. void ccp_pci_exit(void)
  265. {
  266. pci_unregister_driver(&ccp_pci_driver);
  267. }